]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
lib: Move lib/poll-loop.h to include/openvswitch
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22 #include <unistd.h>
23
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
26 #include "flow.h"
27 #include "netdev-provider.h"
28 #include "netdev-vport.h"
29 #include "odp-util.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "openvswitch/list.h"
32 #include "openvswitch/ofp-print.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
36 #include "packets.h"
37 #include "pcap-file.h"
38 #include "openvswitch/poll-loop.h"
39 #include "openvswitch/shash.h"
40 #include "sset.h"
41 #include "stream.h"
42 #include "unaligned.h"
43 #include "timeval.h"
44 #include "unixctl.h"
45 #include "reconnect.h"
46
47 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
48
49 struct reconnect;
50
51 struct dummy_packet_stream {
52 struct stream *stream;
53 struct dp_packet rxbuf;
54 struct ovs_list txq;
55 };
56
57 enum dummy_packet_conn_type {
58 NONE, /* No connection is configured. */
59 PASSIVE, /* Listener. */
60 ACTIVE /* Connect to listener. */
61 };
62
63 enum dummy_netdev_conn_state {
64 CONN_STATE_CONNECTED, /* Listener connected. */
65 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
66 CONN_STATE_UNKNOWN, /* No relavent information. */
67 };
68
69 struct dummy_packet_pconn {
70 struct pstream *pstream;
71 struct dummy_packet_stream **streams;
72 size_t n_streams;
73 };
74
75 struct dummy_packet_rconn {
76 struct dummy_packet_stream *rstream;
77 struct reconnect *reconnect;
78 };
79
80 struct dummy_packet_conn {
81 enum dummy_packet_conn_type type;
82 union {
83 struct dummy_packet_pconn pconn;
84 struct dummy_packet_rconn rconn;
85 } u;
86 };
87
88 struct pkt_list_node {
89 struct dp_packet *pkt;
90 struct ovs_list list_node;
91 };
92
93 /* Protects 'dummy_list'. */
94 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95
96 /* Contains all 'struct dummy_dev's. */
97 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
98 = OVS_LIST_INITIALIZER(&dummy_list);
99
100 struct netdev_dummy {
101 struct netdev up;
102
103 /* In dummy_list. */
104 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105
106 /* Protects all members below. */
107 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108
109 struct eth_addr hwaddr OVS_GUARDED;
110 int mtu OVS_GUARDED;
111 struct netdev_stats stats OVS_GUARDED;
112 enum netdev_flags flags OVS_GUARDED;
113 int ifindex OVS_GUARDED;
114 int numa_id OVS_GUARDED;
115
116 struct dummy_packet_conn conn OVS_GUARDED;
117
118 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
119
120 struct in_addr address, netmask;
121 struct in6_addr ipv6, ipv6_mask;
122 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
123
124 /* The following properties are for dummy-pmd and they cannot be changed
125 * when a device is running, so we remember the request and update them
126 * next time netdev_dummy_reconfigure() is called. */
127 int requested_n_txq OVS_GUARDED;
128 int requested_n_rxq OVS_GUARDED;
129 int requested_numa_id OVS_GUARDED;
130 };
131
132 /* Max 'recv_queue_len' in struct netdev_dummy. */
133 #define NETDEV_DUMMY_MAX_QUEUE 100
134
135 struct netdev_rxq_dummy {
136 struct netdev_rxq up;
137 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
138 struct ovs_list recv_queue;
139 int recv_queue_len; /* ovs_list_size(&recv_queue). */
140 struct seq *seq; /* Reports newly queued packets. */
141 };
142
143 static unixctl_cb_func netdev_dummy_set_admin_state;
144 static int netdev_dummy_construct(struct netdev *);
145 static void netdev_dummy_queue_packet(struct netdev_dummy *,
146 struct dp_packet *, int);
147
148 static void dummy_packet_stream_close(struct dummy_packet_stream *);
149
150 static void pkt_list_delete(struct ovs_list *);
151
152 static bool
153 is_dummy_class(const struct netdev_class *class)
154 {
155 return class->construct == netdev_dummy_construct;
156 }
157
158 static struct netdev_dummy *
159 netdev_dummy_cast(const struct netdev *netdev)
160 {
161 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
162 return CONTAINER_OF(netdev, struct netdev_dummy, up);
163 }
164
165 static struct netdev_rxq_dummy *
166 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
167 {
168 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
169 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
170 }
171
172 static void
173 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
174 {
175 int rxbuf_size = stream ? 2048 : 0;
176 s->stream = stream;
177 dp_packet_init(&s->rxbuf, rxbuf_size);
178 ovs_list_init(&s->txq);
179 }
180
181 static struct dummy_packet_stream *
182 dummy_packet_stream_create(struct stream *stream)
183 {
184 struct dummy_packet_stream *s;
185
186 s = xzalloc(sizeof *s);
187 dummy_packet_stream_init(s, stream);
188
189 return s;
190 }
191
192 static void
193 dummy_packet_stream_wait(struct dummy_packet_stream *s)
194 {
195 stream_run_wait(s->stream);
196 if (!ovs_list_is_empty(&s->txq)) {
197 stream_send_wait(s->stream);
198 }
199 stream_recv_wait(s->stream);
200 }
201
202 static void
203 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
204 {
205 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
206 struct dp_packet *b;
207 struct pkt_list_node *node;
208
209 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
210 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
211
212 node = xmalloc(sizeof *node);
213 node->pkt = b;
214 ovs_list_push_back(&s->txq, &node->list_node);
215 }
216 }
217
218 static int
219 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
220 {
221 int error = 0;
222 size_t n;
223
224 stream_run(s->stream);
225
226 if (!ovs_list_is_empty(&s->txq)) {
227 struct pkt_list_node *txbuf_node;
228 struct dp_packet *txbuf;
229 int retval;
230
231 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
232 txbuf = txbuf_node->pkt;
233 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
234
235 if (retval > 0) {
236 dp_packet_pull(txbuf, retval);
237 if (!dp_packet_size(txbuf)) {
238 ovs_list_remove(&txbuf_node->list_node);
239 free(txbuf_node);
240 dp_packet_delete(txbuf);
241 }
242 } else if (retval != -EAGAIN) {
243 error = -retval;
244 }
245 }
246
247 if (!error) {
248 if (dp_packet_size(&s->rxbuf) < 2) {
249 n = 2 - dp_packet_size(&s->rxbuf);
250 } else {
251 uint16_t frame_len;
252
253 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
254 if (frame_len < ETH_HEADER_LEN) {
255 error = EPROTO;
256 n = 0;
257 } else {
258 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
259 }
260 }
261 }
262 if (!error) {
263 int retval;
264
265 dp_packet_prealloc_tailroom(&s->rxbuf, n);
266 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
267
268 if (retval > 0) {
269 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
270 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
271 dp_packet_pull(&s->rxbuf, 2);
272 netdev_dummy_queue_packet(dev,
273 dp_packet_clone(&s->rxbuf), 0);
274 dp_packet_clear(&s->rxbuf);
275 }
276 } else if (retval != -EAGAIN) {
277 error = (retval < 0 ? -retval
278 : dp_packet_size(&s->rxbuf) ? EPROTO
279 : EOF);
280 }
281 }
282
283 return error;
284 }
285
286 static void
287 dummy_packet_stream_close(struct dummy_packet_stream *s)
288 {
289 stream_close(s->stream);
290 dp_packet_uninit(&s->rxbuf);
291 pkt_list_delete(&s->txq);
292 }
293
294 static void
295 dummy_packet_conn_init(struct dummy_packet_conn *conn)
296 {
297 memset(conn, 0, sizeof *conn);
298 conn->type = NONE;
299 }
300
301 static void
302 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
303 {
304
305 switch (conn->type) {
306 case PASSIVE:
307 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
308 break;
309
310 case ACTIVE:
311 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
312 break;
313
314 case NONE:
315 default:
316 break;
317 }
318 }
319
320 static void
321 dummy_packet_conn_close(struct dummy_packet_conn *conn)
322 {
323 int i;
324 struct dummy_packet_pconn *pconn = &conn->u.pconn;
325 struct dummy_packet_rconn *rconn = &conn->u.rconn;
326
327 switch (conn->type) {
328 case PASSIVE:
329 pstream_close(pconn->pstream);
330 for (i = 0; i < pconn->n_streams; i++) {
331 dummy_packet_stream_close(pconn->streams[i]);
332 free(pconn->streams[i]);
333 }
334 free(pconn->streams);
335 pconn->pstream = NULL;
336 pconn->streams = NULL;
337 break;
338
339 case ACTIVE:
340 dummy_packet_stream_close(rconn->rstream);
341 free(rconn->rstream);
342 rconn->rstream = NULL;
343 reconnect_destroy(rconn->reconnect);
344 rconn->reconnect = NULL;
345 break;
346
347 case NONE:
348 default:
349 break;
350 }
351
352 conn->type = NONE;
353 memset(conn, 0, sizeof *conn);
354 }
355
356 static void
357 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
358 const struct smap *args)
359 {
360 const char *pstream = smap_get(args, "pstream");
361 const char *stream = smap_get(args, "stream");
362
363 if (pstream && stream) {
364 VLOG_WARN("Open failed: both %s and %s are configured",
365 pstream, stream);
366 return;
367 }
368
369 switch (conn->type) {
370 case PASSIVE:
371 if (pstream &&
372 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
373 return;
374 }
375 dummy_packet_conn_close(conn);
376 break;
377 case ACTIVE:
378 if (stream &&
379 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
380 return;
381 }
382 dummy_packet_conn_close(conn);
383 break;
384 case NONE:
385 default:
386 break;
387 }
388
389 if (pstream) {
390 int error;
391
392 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
393 if (error) {
394 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
395 } else {
396 conn->type = PASSIVE;
397 }
398 }
399
400 if (stream) {
401 int error;
402 struct stream *active_stream;
403 struct reconnect *reconnect;
404
405 reconnect = reconnect_create(time_msec());
406 reconnect_set_name(reconnect, stream);
407 reconnect_set_passive(reconnect, false, time_msec());
408 reconnect_enable(reconnect, time_msec());
409 reconnect_set_backoff(reconnect, 100, INT_MAX);
410 reconnect_set_probe_interval(reconnect, 0);
411 conn->u.rconn.reconnect = reconnect;
412 conn->type = ACTIVE;
413
414 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
415 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
416
417 switch (error) {
418 case 0:
419 reconnect_connected(reconnect, time_msec());
420 break;
421
422 case EAGAIN:
423 reconnect_connecting(reconnect, time_msec());
424 break;
425
426 default:
427 reconnect_connect_failed(reconnect, time_msec(), error);
428 stream_close(active_stream);
429 conn->u.rconn.rstream->stream = NULL;
430 break;
431 }
432 }
433 }
434
435 static void
436 dummy_pconn_run(struct netdev_dummy *dev)
437 OVS_REQUIRES(dev->mutex)
438 {
439 struct stream *new_stream;
440 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
441 int error;
442 size_t i;
443
444 error = pstream_accept(pconn->pstream, &new_stream);
445 if (!error) {
446 struct dummy_packet_stream *s;
447
448 pconn->streams = xrealloc(pconn->streams,
449 ((pconn->n_streams + 1)
450 * sizeof s));
451 s = xmalloc(sizeof *s);
452 pconn->streams[pconn->n_streams++] = s;
453 dummy_packet_stream_init(s, new_stream);
454 } else if (error != EAGAIN) {
455 VLOG_WARN("%s: accept failed (%s)",
456 pstream_get_name(pconn->pstream), ovs_strerror(error));
457 pstream_close(pconn->pstream);
458 pconn->pstream = NULL;
459 dev->conn.type = NONE;
460 }
461
462 for (i = 0; i < pconn->n_streams; ) {
463 struct dummy_packet_stream *s = pconn->streams[i];
464
465 error = dummy_packet_stream_run(dev, s);
466 if (error) {
467 VLOG_DBG("%s: closing connection (%s)",
468 stream_get_name(s->stream),
469 ovs_retval_to_string(error));
470 dummy_packet_stream_close(s);
471 free(s);
472 pconn->streams[i] = pconn->streams[--pconn->n_streams];
473 } else {
474 i++;
475 }
476 }
477 }
478
479 static void
480 dummy_rconn_run(struct netdev_dummy *dev)
481 OVS_REQUIRES(dev->mutex)
482 {
483 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
484
485 switch (reconnect_run(rconn->reconnect, time_msec())) {
486 case RECONNECT_CONNECT:
487 {
488 int error;
489
490 if (rconn->rstream->stream) {
491 error = stream_connect(rconn->rstream->stream);
492 } else {
493 error = stream_open(reconnect_get_name(rconn->reconnect),
494 &rconn->rstream->stream, DSCP_DEFAULT);
495 }
496
497 switch (error) {
498 case 0:
499 reconnect_connected(rconn->reconnect, time_msec());
500 break;
501
502 case EAGAIN:
503 reconnect_connecting(rconn->reconnect, time_msec());
504 break;
505
506 default:
507 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
508 stream_close(rconn->rstream->stream);
509 rconn->rstream->stream = NULL;
510 break;
511 }
512 }
513 break;
514
515 case RECONNECT_DISCONNECT:
516 case RECONNECT_PROBE:
517 default:
518 break;
519 }
520
521 if (reconnect_is_connected(rconn->reconnect)) {
522 int err;
523
524 err = dummy_packet_stream_run(dev, rconn->rstream);
525
526 if (err) {
527 reconnect_disconnected(rconn->reconnect, time_msec(), err);
528 stream_close(rconn->rstream->stream);
529 rconn->rstream->stream = NULL;
530 }
531 }
532 }
533
534 static void
535 dummy_packet_conn_run(struct netdev_dummy *dev)
536 OVS_REQUIRES(dev->mutex)
537 {
538 switch (dev->conn.type) {
539 case PASSIVE:
540 dummy_pconn_run(dev);
541 break;
542
543 case ACTIVE:
544 dummy_rconn_run(dev);
545 break;
546
547 case NONE:
548 default:
549 break;
550 }
551 }
552
553 static void
554 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
555 {
556 int i;
557 switch (conn->type) {
558 case PASSIVE:
559 pstream_wait(conn->u.pconn.pstream);
560 for (i = 0; i < conn->u.pconn.n_streams; i++) {
561 struct dummy_packet_stream *s = conn->u.pconn.streams[i];
562 dummy_packet_stream_wait(s);
563 }
564 break;
565 case ACTIVE:
566 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
567 dummy_packet_stream_wait(conn->u.rconn.rstream);
568 }
569 break;
570
571 case NONE:
572 default:
573 break;
574 }
575 }
576
577 static void
578 dummy_packet_conn_send(struct dummy_packet_conn *conn,
579 const void *buffer, size_t size)
580 {
581 int i;
582
583 switch (conn->type) {
584 case PASSIVE:
585 for (i = 0; i < conn->u.pconn.n_streams; i++) {
586 struct dummy_packet_stream *s = conn->u.pconn.streams[i];
587
588 dummy_packet_stream_send(s, buffer, size);
589 pstream_wait(conn->u.pconn.pstream);
590 }
591 break;
592
593 case ACTIVE:
594 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
595 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
596 dummy_packet_stream_wait(conn->u.rconn.rstream);
597 }
598 break;
599
600 case NONE:
601 default:
602 break;
603 }
604 }
605
606 static enum dummy_netdev_conn_state
607 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
608 {
609 enum dummy_netdev_conn_state state;
610
611 if (conn->type == ACTIVE) {
612 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
613 state = CONN_STATE_CONNECTED;
614 } else {
615 state = CONN_STATE_NOT_CONNECTED;
616 }
617 } else {
618 state = CONN_STATE_UNKNOWN;
619 }
620
621 return state;
622 }
623
624 static void
625 netdev_dummy_run(const struct netdev_class *netdev_class)
626 {
627 struct netdev_dummy *dev;
628
629 ovs_mutex_lock(&dummy_list_mutex);
630 LIST_FOR_EACH (dev, list_node, &dummy_list) {
631 if (netdev_get_class(&dev->up) != netdev_class) {
632 continue;
633 }
634 ovs_mutex_lock(&dev->mutex);
635 dummy_packet_conn_run(dev);
636 ovs_mutex_unlock(&dev->mutex);
637 }
638 ovs_mutex_unlock(&dummy_list_mutex);
639 }
640
641 static void
642 netdev_dummy_wait(const struct netdev_class *netdev_class)
643 {
644 struct netdev_dummy *dev;
645
646 ovs_mutex_lock(&dummy_list_mutex);
647 LIST_FOR_EACH (dev, list_node, &dummy_list) {
648 if (netdev_get_class(&dev->up) != netdev_class) {
649 continue;
650 }
651 ovs_mutex_lock(&dev->mutex);
652 dummy_packet_conn_wait(&dev->conn);
653 ovs_mutex_unlock(&dev->mutex);
654 }
655 ovs_mutex_unlock(&dummy_list_mutex);
656 }
657
658 static struct netdev *
659 netdev_dummy_alloc(void)
660 {
661 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
662 return &netdev->up;
663 }
664
665 static int
666 netdev_dummy_construct(struct netdev *netdev_)
667 {
668 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
669 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
670 unsigned int n;
671
672 n = atomic_count_inc(&next_n);
673
674 ovs_mutex_init(&netdev->mutex);
675 ovs_mutex_lock(&netdev->mutex);
676 netdev->hwaddr.ea[0] = 0xaa;
677 netdev->hwaddr.ea[1] = 0x55;
678 netdev->hwaddr.ea[2] = n >> 24;
679 netdev->hwaddr.ea[3] = n >> 16;
680 netdev->hwaddr.ea[4] = n >> 8;
681 netdev->hwaddr.ea[5] = n;
682 netdev->mtu = 1500;
683 netdev->flags = 0;
684 netdev->ifindex = -EOPNOTSUPP;
685 netdev->requested_n_rxq = netdev_->n_rxq;
686 netdev->requested_n_txq = netdev_->n_txq;
687 netdev->numa_id = 0;
688
689 dummy_packet_conn_init(&netdev->conn);
690
691 ovs_list_init(&netdev->rxes);
692 ovs_mutex_unlock(&netdev->mutex);
693
694 ovs_mutex_lock(&dummy_list_mutex);
695 ovs_list_push_back(&dummy_list, &netdev->list_node);
696 ovs_mutex_unlock(&dummy_list_mutex);
697
698 return 0;
699 }
700
701 static void
702 netdev_dummy_destruct(struct netdev *netdev_)
703 {
704 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
705
706 ovs_mutex_lock(&dummy_list_mutex);
707 ovs_list_remove(&netdev->list_node);
708 ovs_mutex_unlock(&dummy_list_mutex);
709
710 ovs_mutex_lock(&netdev->mutex);
711 if (netdev->rxq_pcap) {
712 fclose(netdev->rxq_pcap);
713 }
714 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
715 fclose(netdev->tx_pcap);
716 }
717 dummy_packet_conn_close(&netdev->conn);
718 netdev->conn.type = NONE;
719
720 ovs_mutex_unlock(&netdev->mutex);
721 ovs_mutex_destroy(&netdev->mutex);
722 }
723
724 static void
725 netdev_dummy_dealloc(struct netdev *netdev_)
726 {
727 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
728
729 free(netdev);
730 }
731
732 static int
733 netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
734 {
735 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
736
737 ovs_mutex_lock(&netdev->mutex);
738
739 if (netdev->ifindex >= 0) {
740 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
741 }
742
743 dummy_packet_conn_get_config(&netdev->conn, args);
744
745 /* 'dummy-pmd' specific config. */
746 if (!netdev_is_pmd(dev)) {
747 goto exit;
748 }
749 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
750 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
751 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
752 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
753
754 exit:
755 ovs_mutex_unlock(&netdev->mutex);
756 return 0;
757 }
758
759 static int
760 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
761 struct in6_addr **pmask, int *n_addr)
762 {
763 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
764 int cnt = 0, i = 0, err = 0;
765 struct in6_addr *addr, *mask;
766
767 ovs_mutex_lock(&netdev->mutex);
768 if (netdev->address.s_addr != INADDR_ANY) {
769 cnt++;
770 }
771
772 if (ipv6_addr_is_set(&netdev->ipv6)) {
773 cnt++;
774 }
775 if (!cnt) {
776 err = EADDRNOTAVAIL;
777 goto out;
778 }
779 addr = xmalloc(sizeof *addr * cnt);
780 mask = xmalloc(sizeof *mask * cnt);
781 if (netdev->address.s_addr != INADDR_ANY) {
782 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
783 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
784 i++;
785 }
786
787 if (ipv6_addr_is_set(&netdev->ipv6)) {
788 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
789 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
790 i++;
791 }
792 if (paddr) {
793 *paddr = addr;
794 *pmask = mask;
795 *n_addr = cnt;
796 } else {
797 free(addr);
798 free(mask);
799 }
800 out:
801 ovs_mutex_unlock(&netdev->mutex);
802
803 return err;
804 }
805
806 static int
807 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
808 struct in_addr netmask)
809 {
810 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
811
812 ovs_mutex_lock(&netdev->mutex);
813 netdev->address = address;
814 netdev->netmask = netmask;
815 netdev_change_seq_changed(netdev_);
816 ovs_mutex_unlock(&netdev->mutex);
817
818 return 0;
819 }
820
821 static int
822 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
823 struct in6_addr *mask)
824 {
825 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
826
827 ovs_mutex_lock(&netdev->mutex);
828 netdev->ipv6 = *in6;
829 netdev->ipv6_mask = *mask;
830 netdev_change_seq_changed(netdev_);
831 ovs_mutex_unlock(&netdev->mutex);
832
833 return 0;
834 }
835
836 #define DUMMY_MAX_QUEUES_PER_PORT 1024
837
838 static int
839 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args,
840 char **errp OVS_UNUSED)
841 {
842 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
843 const char *pcap;
844 int new_n_rxq, new_n_txq, new_numa_id;
845
846 ovs_mutex_lock(&netdev->mutex);
847 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
848
849 dummy_packet_conn_set_config(&netdev->conn, args);
850
851 if (netdev->rxq_pcap) {
852 fclose(netdev->rxq_pcap);
853 }
854 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
855 fclose(netdev->tx_pcap);
856 }
857 netdev->rxq_pcap = netdev->tx_pcap = NULL;
858 pcap = smap_get(args, "pcap");
859 if (pcap) {
860 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
861 } else {
862 const char *rxq_pcap = smap_get(args, "rxq_pcap");
863 const char *tx_pcap = smap_get(args, "tx_pcap");
864
865 if (rxq_pcap) {
866 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
867 }
868 if (tx_pcap) {
869 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
870 }
871 }
872
873 netdev_change_seq_changed(netdev_);
874
875 /* 'dummy-pmd' specific config. */
876 if (!netdev_->netdev_class->is_pmd) {
877 goto exit;
878 }
879
880 new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
881 new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1);
882
883 if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT ||
884 new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) {
885 VLOG_WARN("The one or both of interface %s queues"
886 "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
887 netdev_get_name(netdev_),
888 new_n_rxq,
889 new_n_txq,
890 DUMMY_MAX_QUEUES_PER_PORT,
891 DUMMY_MAX_QUEUES_PER_PORT);
892
893 new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq);
894 new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq);
895 }
896
897 new_numa_id = smap_get_int(args, "numa_id", 0);
898 if (new_n_rxq != netdev->requested_n_rxq
899 || new_n_txq != netdev->requested_n_txq
900 || new_numa_id != netdev->requested_numa_id) {
901 netdev->requested_n_rxq = new_n_rxq;
902 netdev->requested_n_txq = new_n_txq;
903 netdev->requested_numa_id = new_numa_id;
904 netdev_request_reconfigure(netdev_);
905 }
906
907 exit:
908 ovs_mutex_unlock(&netdev->mutex);
909 return 0;
910 }
911
912 static int
913 netdev_dummy_get_numa_id(const struct netdev *netdev_)
914 {
915 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
916
917 ovs_mutex_lock(&netdev->mutex);
918 int numa_id = netdev->numa_id;
919 ovs_mutex_unlock(&netdev->mutex);
920
921 return numa_id;
922 }
923
924 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
925 static int
926 netdev_dummy_reconfigure(struct netdev *netdev_)
927 {
928 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
929
930 ovs_mutex_lock(&netdev->mutex);
931
932 netdev_->n_txq = netdev->requested_n_txq;
933 netdev_->n_rxq = netdev->requested_n_rxq;
934 netdev->numa_id = netdev->requested_numa_id;
935
936 ovs_mutex_unlock(&netdev->mutex);
937 return 0;
938 }
939
940 static struct netdev_rxq *
941 netdev_dummy_rxq_alloc(void)
942 {
943 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
944 return &rx->up;
945 }
946
947 static int
948 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
949 {
950 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
951 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
952
953 ovs_mutex_lock(&netdev->mutex);
954 ovs_list_push_back(&netdev->rxes, &rx->node);
955 ovs_list_init(&rx->recv_queue);
956 rx->recv_queue_len = 0;
957 rx->seq = seq_create();
958 ovs_mutex_unlock(&netdev->mutex);
959
960 return 0;
961 }
962
963 static void
964 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
965 {
966 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
967 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
968
969 ovs_mutex_lock(&netdev->mutex);
970 ovs_list_remove(&rx->node);
971 pkt_list_delete(&rx->recv_queue);
972 ovs_mutex_unlock(&netdev->mutex);
973 seq_destroy(rx->seq);
974 }
975
976 static void
977 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
978 {
979 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
980
981 free(rx);
982 }
983
984 static int
985 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch)
986 {
987 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
988 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
989 struct dp_packet *packet;
990
991 ovs_mutex_lock(&netdev->mutex);
992 if (!ovs_list_is_empty(&rx->recv_queue)) {
993 struct pkt_list_node *pkt_node;
994
995 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
996 packet = pkt_node->pkt;
997 free(pkt_node);
998 rx->recv_queue_len--;
999 } else {
1000 packet = NULL;
1001 }
1002 ovs_mutex_unlock(&netdev->mutex);
1003
1004 if (!packet) {
1005 if (netdev_is_pmd(&netdev->up)) {
1006 /* If 'netdev' is a PMD device, this is called as part of the PMD
1007 * thread busy loop. We yield here (without quiescing) for two
1008 * reasons:
1009 *
1010 * - To reduce the CPU utilization during the testsuite
1011 * - To give valgrind a chance to switch thread. According
1012 * to the valgrind documentation, there's a big lock that
1013 * prevents multiple thread from being executed at the same
1014 * time. On my system, without this sleep, the pmd threads
1015 * testcases fail under valgrind, because ovs-vswitchd becomes
1016 * unresponsive. */
1017 sched_yield();
1018 }
1019 return EAGAIN;
1020 }
1021 ovs_mutex_lock(&netdev->mutex);
1022 netdev->stats.rx_packets++;
1023 netdev->stats.rx_bytes += dp_packet_size(packet);
1024 ovs_mutex_unlock(&netdev->mutex);
1025
1026 batch->packets[0] = packet;
1027 batch->count = 1;
1028 return 0;
1029 }
1030
1031 static void
1032 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
1033 {
1034 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1035 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1036 uint64_t seq = seq_read(rx->seq);
1037
1038 ovs_mutex_lock(&netdev->mutex);
1039 if (!ovs_list_is_empty(&rx->recv_queue)) {
1040 poll_immediate_wake();
1041 } else {
1042 seq_wait(rx->seq, seq);
1043 }
1044 ovs_mutex_unlock(&netdev->mutex);
1045 }
1046
1047 static int
1048 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
1049 {
1050 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1051 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1052
1053 ovs_mutex_lock(&netdev->mutex);
1054 pkt_list_delete(&rx->recv_queue);
1055 rx->recv_queue_len = 0;
1056 ovs_mutex_unlock(&netdev->mutex);
1057
1058 seq_change(rx->seq);
1059
1060 return 0;
1061 }
1062
1063 static int
1064 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
1065 struct dp_packet_batch *batch, bool may_steal,
1066 bool concurrent_txq OVS_UNUSED)
1067 {
1068 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1069 int error = 0;
1070
1071 struct dp_packet *packet;
1072 DP_PACKET_BATCH_FOR_EACH(packet, batch) {
1073 const void *buffer = dp_packet_data(packet);
1074 size_t size = dp_packet_get_send_len(packet);
1075
1076 if (batch->packets[i]->packet_type != htonl(PT_ETH)) {
1077 error = EPFNOSUPPORT;
1078 break;
1079 }
1080
1081 if (size < ETH_HEADER_LEN) {
1082 error = EMSGSIZE;
1083 break;
1084 } else {
1085 const struct eth_header *eth = buffer;
1086 int max_size;
1087
1088 ovs_mutex_lock(&dev->mutex);
1089 max_size = dev->mtu + ETH_HEADER_LEN;
1090 ovs_mutex_unlock(&dev->mutex);
1091
1092 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1093 max_size += VLAN_HEADER_LEN;
1094 }
1095 if (size > max_size) {
1096 error = EMSGSIZE;
1097 break;
1098 }
1099 }
1100
1101 ovs_mutex_lock(&dev->mutex);
1102 dev->stats.tx_packets++;
1103 dev->stats.tx_bytes += size;
1104
1105 dummy_packet_conn_send(&dev->conn, buffer, size);
1106
1107 /* Reply to ARP requests for 'dev''s assigned IP address. */
1108 if (dev->address.s_addr) {
1109 struct dp_packet dp;
1110 struct flow flow;
1111
1112 dp_packet_use_const(&dp, buffer, size);
1113 flow_extract(&dp, &flow);
1114 if (flow.dl_type == htons(ETH_TYPE_ARP)
1115 && flow.nw_proto == ARP_OP_REQUEST
1116 && flow.nw_dst == dev->address.s_addr) {
1117 struct dp_packet *reply = dp_packet_new(0);
1118 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1119 false, flow.nw_dst, flow.nw_src);
1120 netdev_dummy_queue_packet(dev, reply, 0);
1121 }
1122 }
1123
1124 if (dev->tx_pcap) {
1125 struct dp_packet dp;
1126
1127 dp_packet_use_const(&dp, buffer, size);
1128 ovs_pcap_write(dev->tx_pcap, &dp);
1129 fflush(dev->tx_pcap);
1130 }
1131
1132 ovs_mutex_unlock(&dev->mutex);
1133 }
1134
1135 dp_packet_delete_batch(batch, may_steal);
1136
1137 return error;
1138 }
1139
1140 static int
1141 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1142 {
1143 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1144
1145 ovs_mutex_lock(&dev->mutex);
1146 if (!eth_addr_equals(dev->hwaddr, mac)) {
1147 dev->hwaddr = mac;
1148 netdev_change_seq_changed(netdev);
1149 }
1150 ovs_mutex_unlock(&dev->mutex);
1151
1152 return 0;
1153 }
1154
1155 static int
1156 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1157 {
1158 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1159
1160 ovs_mutex_lock(&dev->mutex);
1161 *mac = dev->hwaddr;
1162 ovs_mutex_unlock(&dev->mutex);
1163
1164 return 0;
1165 }
1166
1167 static int
1168 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1169 {
1170 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1171
1172 ovs_mutex_lock(&dev->mutex);
1173 *mtup = dev->mtu;
1174 ovs_mutex_unlock(&dev->mutex);
1175
1176 return 0;
1177 }
1178
1179 #define DUMMY_MIN_MTU 68
1180 #define DUMMY_MAX_MTU 65535
1181
1182 static int
1183 netdev_dummy_set_mtu(struct netdev *netdev, int mtu)
1184 {
1185 if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) {
1186 return EINVAL;
1187 }
1188
1189 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1190
1191 ovs_mutex_lock(&dev->mutex);
1192 if (dev->mtu != mtu) {
1193 dev->mtu = mtu;
1194 netdev_change_seq_changed(netdev);
1195 }
1196 ovs_mutex_unlock(&dev->mutex);
1197
1198 return 0;
1199 }
1200
1201 static int
1202 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1203 {
1204 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1205
1206 ovs_mutex_lock(&dev->mutex);
1207 /* Passing only collected counters */
1208 stats->tx_packets = dev->stats.tx_packets;
1209 stats->tx_bytes = dev->stats.tx_bytes;
1210 stats->rx_packets = dev->stats.rx_packets;
1211 stats->rx_bytes = dev->stats.rx_bytes;
1212 ovs_mutex_unlock(&dev->mutex);
1213
1214 return 0;
1215 }
1216
1217 static int
1218 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1219 unsigned int queue_id, struct smap *details OVS_UNUSED)
1220 {
1221 if (queue_id == 0) {
1222 return 0;
1223 } else {
1224 return EINVAL;
1225 }
1226 }
1227
1228 static void
1229 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1230 {
1231 *stats = (struct netdev_queue_stats) {
1232 .tx_bytes = UINT64_MAX,
1233 .tx_packets = UINT64_MAX,
1234 .tx_errors = UINT64_MAX,
1235 .created = LLONG_MIN,
1236 };
1237 }
1238
1239 static int
1240 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1241 unsigned int queue_id,
1242 struct netdev_queue_stats *stats)
1243 {
1244 if (queue_id == 0) {
1245 netdev_dummy_init_queue_stats(stats);
1246 return 0;
1247 } else {
1248 return EINVAL;
1249 }
1250 }
1251
1252 struct netdev_dummy_queue_state {
1253 unsigned int next_queue;
1254 };
1255
1256 static int
1257 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1258 void **statep)
1259 {
1260 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1261 state->next_queue = 0;
1262 *statep = state;
1263 return 0;
1264 }
1265
1266 static int
1267 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1268 void *state_,
1269 unsigned int *queue_id,
1270 struct smap *details OVS_UNUSED)
1271 {
1272 struct netdev_dummy_queue_state *state = state_;
1273 if (state->next_queue == 0) {
1274 *queue_id = 0;
1275 state->next_queue++;
1276 return 0;
1277 } else {
1278 return EOF;
1279 }
1280 }
1281
1282 static int
1283 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1284 void *state)
1285 {
1286 free(state);
1287 return 0;
1288 }
1289
1290 static int
1291 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1292 void (*cb)(unsigned int queue_id,
1293 struct netdev_queue_stats *,
1294 void *aux),
1295 void *aux)
1296 {
1297 struct netdev_queue_stats stats;
1298 netdev_dummy_init_queue_stats(&stats);
1299 cb(0, &stats, aux);
1300 return 0;
1301 }
1302
1303 static int
1304 netdev_dummy_get_ifindex(const struct netdev *netdev)
1305 {
1306 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1307 int ifindex;
1308
1309 ovs_mutex_lock(&dev->mutex);
1310 ifindex = dev->ifindex;
1311 ovs_mutex_unlock(&dev->mutex);
1312
1313 return ifindex;
1314 }
1315
1316 static int
1317 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1318 enum netdev_flags off, enum netdev_flags on,
1319 enum netdev_flags *old_flagsp)
1320 OVS_REQUIRES(netdev->mutex)
1321 {
1322 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1323 return EINVAL;
1324 }
1325
1326 *old_flagsp = netdev->flags;
1327 netdev->flags |= on;
1328 netdev->flags &= ~off;
1329 if (*old_flagsp != netdev->flags) {
1330 netdev_change_seq_changed(&netdev->up);
1331 }
1332
1333 return 0;
1334 }
1335
1336 static int
1337 netdev_dummy_update_flags(struct netdev *netdev_,
1338 enum netdev_flags off, enum netdev_flags on,
1339 enum netdev_flags *old_flagsp)
1340 {
1341 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1342 int error;
1343
1344 ovs_mutex_lock(&netdev->mutex);
1345 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1346 ovs_mutex_unlock(&netdev->mutex);
1347
1348 return error;
1349 }
1350 \f
1351 /* Helper functions. */
1352
1353 #define NETDEV_DUMMY_CLASS(NAME, PMD, RECOFIGURE) \
1354 { \
1355 NAME, \
1356 PMD, /* is_pmd */ \
1357 NULL, /* init */ \
1358 netdev_dummy_run, \
1359 netdev_dummy_wait, \
1360 \
1361 netdev_dummy_alloc, \
1362 netdev_dummy_construct, \
1363 netdev_dummy_destruct, \
1364 netdev_dummy_dealloc, \
1365 netdev_dummy_get_config, \
1366 netdev_dummy_set_config, \
1367 NULL, /* get_tunnel_config */ \
1368 NULL, /* build header */ \
1369 NULL, /* push header */ \
1370 NULL, /* pop header */ \
1371 netdev_dummy_get_numa_id, \
1372 NULL, /* set_tx_multiq */ \
1373 \
1374 netdev_dummy_send, /* send */ \
1375 NULL, /* send_wait */ \
1376 \
1377 netdev_dummy_set_etheraddr, \
1378 netdev_dummy_get_etheraddr, \
1379 netdev_dummy_get_mtu, \
1380 netdev_dummy_set_mtu, \
1381 netdev_dummy_get_ifindex, \
1382 NULL, /* get_carrier */ \
1383 NULL, /* get_carrier_resets */ \
1384 NULL, /* get_miimon */ \
1385 netdev_dummy_get_stats, \
1386 \
1387 NULL, /* get_features */ \
1388 NULL, /* set_advertisements */ \
1389 NULL, /* get_pt_mode */ \
1390 \
1391 NULL, /* set_policing */ \
1392 NULL, /* get_qos_types */ \
1393 NULL, /* get_qos_capabilities */ \
1394 NULL, /* get_qos */ \
1395 NULL, /* set_qos */ \
1396 netdev_dummy_get_queue, \
1397 NULL, /* set_queue */ \
1398 NULL, /* delete_queue */ \
1399 netdev_dummy_get_queue_stats, \
1400 netdev_dummy_queue_dump_start, \
1401 netdev_dummy_queue_dump_next, \
1402 netdev_dummy_queue_dump_done, \
1403 netdev_dummy_dump_queue_stats, \
1404 \
1405 NULL, /* set_in4 */ \
1406 netdev_dummy_get_addr_list, \
1407 NULL, /* add_router */ \
1408 NULL, /* get_next_hop */ \
1409 NULL, /* get_status */ \
1410 NULL, /* arp_lookup */ \
1411 \
1412 netdev_dummy_update_flags, \
1413 RECOFIGURE, \
1414 \
1415 netdev_dummy_rxq_alloc, \
1416 netdev_dummy_rxq_construct, \
1417 netdev_dummy_rxq_destruct, \
1418 netdev_dummy_rxq_dealloc, \
1419 netdev_dummy_rxq_recv, \
1420 netdev_dummy_rxq_wait, \
1421 netdev_dummy_rxq_drain, \
1422 \
1423 NO_OFFLOAD_API \
1424 }
1425
1426 static const struct netdev_class dummy_class =
1427 NETDEV_DUMMY_CLASS("dummy", false, NULL);
1428
1429 static const struct netdev_class dummy_internal_class =
1430 NETDEV_DUMMY_CLASS("dummy-internal", false, NULL);
1431
1432 static const struct netdev_class dummy_pmd_class =
1433 NETDEV_DUMMY_CLASS("dummy-pmd", true,
1434 netdev_dummy_reconfigure);
1435
1436 static void
1437 pkt_list_delete(struct ovs_list *l)
1438 {
1439 struct pkt_list_node *pkt;
1440
1441 LIST_FOR_EACH_POP(pkt, list_node, l) {
1442 dp_packet_delete(pkt->pkt);
1443 free(pkt);
1444 }
1445 }
1446
1447 static struct dp_packet *
1448 eth_from_packet(const char *s)
1449 {
1450 struct dp_packet *packet;
1451 eth_from_hex(s, &packet);
1452 return packet;
1453 }
1454
1455 static struct dp_packet *
1456 eth_from_flow(const char *s, size_t packet_size)
1457 {
1458 enum odp_key_fitness fitness;
1459 struct dp_packet *packet;
1460 struct ofpbuf odp_key;
1461 struct flow flow;
1462 int error;
1463
1464 /* Convert string to datapath key.
1465 *
1466 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1467 * the code for that currently calls exit() on parse error. We have to
1468 * settle for parsing a datapath key for now.
1469 */
1470 ofpbuf_init(&odp_key, 0);
1471 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1472 if (error) {
1473 ofpbuf_uninit(&odp_key);
1474 return NULL;
1475 }
1476
1477 /* Convert odp_key to flow. */
1478 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1479 if (fitness == ODP_FIT_ERROR) {
1480 ofpbuf_uninit(&odp_key);
1481 return NULL;
1482 }
1483
1484 packet = dp_packet_new(0);
1485 if (!flow_compose(packet, &flow, packet_size)) {
1486 dp_packet_delete(packet);
1487 packet = NULL;
1488 };
1489
1490 ofpbuf_uninit(&odp_key);
1491 return packet;
1492 }
1493
1494 static void
1495 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1496 {
1497 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1498
1499 pkt_node->pkt = packet;
1500 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1501 rx->recv_queue_len++;
1502 seq_change(rx->seq);
1503 }
1504
1505 static void
1506 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
1507 int queue_id)
1508 OVS_REQUIRES(dummy->mutex)
1509 {
1510 struct netdev_rxq_dummy *rx, *prev;
1511
1512 if (dummy->rxq_pcap) {
1513 ovs_pcap_write(dummy->rxq_pcap, packet);
1514 fflush(dummy->rxq_pcap);
1515 }
1516 prev = NULL;
1517 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1518 if (rx->up.queue_id == queue_id &&
1519 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1520 if (prev) {
1521 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1522 }
1523 prev = rx;
1524 }
1525 }
1526 if (prev) {
1527 netdev_dummy_queue_packet__(prev, packet);
1528 } else {
1529 dp_packet_delete(packet);
1530 }
1531 }
1532
1533 static void
1534 netdev_dummy_receive(struct unixctl_conn *conn,
1535 int argc, const char *argv[], void *aux OVS_UNUSED)
1536 {
1537 struct netdev_dummy *dummy_dev;
1538 struct netdev *netdev;
1539 int i, k = 1, rx_qid = 0;
1540
1541 netdev = netdev_from_name(argv[k++]);
1542 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1543 unixctl_command_reply_error(conn, "no such dummy netdev");
1544 goto exit_netdev;
1545 }
1546 dummy_dev = netdev_dummy_cast(netdev);
1547
1548 ovs_mutex_lock(&dummy_dev->mutex);
1549
1550 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1551 rx_qid = strtol(argv[k + 1], NULL, 10);
1552 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1553 unixctl_command_reply_error(conn, "bad rx queue id.");
1554 goto exit;
1555 }
1556 k += 2;
1557 }
1558
1559 for (i = k; i < argc; i++) {
1560 struct dp_packet *packet;
1561
1562 /* Try to parse 'argv[i]' as packet in hex. */
1563 packet = eth_from_packet(argv[i]);
1564
1565 if (!packet) {
1566 int packet_size = 0;
1567 const char *flow_str = argv[i];
1568
1569 /* Parse optional --len argument immediately follows a 'flow'. */
1570 if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) {
1571 packet_size = strtol(argv[i + 2], NULL, 10);
1572
1573 if (packet_size < ETH_TOTAL_MIN) {
1574 unixctl_command_reply_error(conn, "too small packet len");
1575 goto exit;
1576 }
1577 i += 2;
1578 }
1579 /* Try parse 'argv[i]' as odp flow. */
1580 packet = eth_from_flow(flow_str, packet_size);
1581
1582 if (!packet) {
1583 unixctl_command_reply_error(conn, "bad packet or flow syntax");
1584 goto exit;
1585 }
1586 }
1587
1588 netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
1589 }
1590
1591 unixctl_command_reply(conn, NULL);
1592
1593 exit:
1594 ovs_mutex_unlock(&dummy_dev->mutex);
1595 exit_netdev:
1596 netdev_close(netdev);
1597 }
1598
1599 static void
1600 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1601 OVS_REQUIRES(dev->mutex)
1602 {
1603 enum netdev_flags old_flags;
1604
1605 if (admin_state) {
1606 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1607 } else {
1608 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1609 }
1610 }
1611
1612 static void
1613 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1614 const char *argv[], void *aux OVS_UNUSED)
1615 {
1616 bool up;
1617
1618 if (!strcasecmp(argv[argc - 1], "up")) {
1619 up = true;
1620 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1621 up = false;
1622 } else {
1623 unixctl_command_reply_error(conn, "Invalid Admin State");
1624 return;
1625 }
1626
1627 if (argc > 2) {
1628 struct netdev *netdev = netdev_from_name(argv[1]);
1629 if (netdev && is_dummy_class(netdev->netdev_class)) {
1630 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1631
1632 ovs_mutex_lock(&dummy_dev->mutex);
1633 netdev_dummy_set_admin_state__(dummy_dev, up);
1634 ovs_mutex_unlock(&dummy_dev->mutex);
1635
1636 netdev_close(netdev);
1637 } else {
1638 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1639 netdev_close(netdev);
1640 return;
1641 }
1642 } else {
1643 struct netdev_dummy *netdev;
1644
1645 ovs_mutex_lock(&dummy_list_mutex);
1646 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1647 ovs_mutex_lock(&netdev->mutex);
1648 netdev_dummy_set_admin_state__(netdev, up);
1649 ovs_mutex_unlock(&netdev->mutex);
1650 }
1651 ovs_mutex_unlock(&dummy_list_mutex);
1652 }
1653 unixctl_command_reply(conn, "OK");
1654 }
1655
1656 static void
1657 display_conn_state__(struct ds *s, const char *name,
1658 enum dummy_netdev_conn_state state)
1659 {
1660 ds_put_format(s, "%s: ", name);
1661
1662 switch (state) {
1663 case CONN_STATE_CONNECTED:
1664 ds_put_cstr(s, "connected\n");
1665 break;
1666
1667 case CONN_STATE_NOT_CONNECTED:
1668 ds_put_cstr(s, "disconnected\n");
1669 break;
1670
1671 case CONN_STATE_UNKNOWN:
1672 default:
1673 ds_put_cstr(s, "unknown\n");
1674 break;
1675 };
1676 }
1677
1678 static void
1679 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1680 const char *argv[], void *aux OVS_UNUSED)
1681 {
1682 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1683 struct ds s;
1684
1685 ds_init(&s);
1686
1687 if (argc > 1) {
1688 const char *dev_name = argv[1];
1689 struct netdev *netdev = netdev_from_name(dev_name);
1690
1691 if (netdev && is_dummy_class(netdev->netdev_class)) {
1692 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1693
1694 ovs_mutex_lock(&dummy_dev->mutex);
1695 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1696 ovs_mutex_unlock(&dummy_dev->mutex);
1697
1698 netdev_close(netdev);
1699 }
1700 display_conn_state__(&s, dev_name, state);
1701 } else {
1702 struct netdev_dummy *netdev;
1703
1704 ovs_mutex_lock(&dummy_list_mutex);
1705 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1706 ovs_mutex_lock(&netdev->mutex);
1707 state = dummy_netdev_get_conn_state(&netdev->conn);
1708 ovs_mutex_unlock(&netdev->mutex);
1709 if (state != CONN_STATE_UNKNOWN) {
1710 display_conn_state__(&s, netdev->up.name, state);
1711 }
1712 }
1713 ovs_mutex_unlock(&dummy_list_mutex);
1714 }
1715
1716 unixctl_command_reply(conn, ds_cstr(&s));
1717 ds_destroy(&s);
1718 }
1719
1720 static void
1721 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1722 const char *argv[], void *aux OVS_UNUSED)
1723 {
1724 struct netdev *netdev = netdev_from_name(argv[1]);
1725
1726 if (netdev && is_dummy_class(netdev->netdev_class)) {
1727 struct in_addr ip, mask;
1728 char *error;
1729
1730 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1731 if (!error) {
1732 netdev_dummy_set_in4(netdev, ip, mask);
1733 unixctl_command_reply(conn, "OK");
1734 } else {
1735 unixctl_command_reply_error(conn, error);
1736 free(error);
1737 }
1738 } else {
1739 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1740 }
1741
1742 netdev_close(netdev);
1743 }
1744
1745 static void
1746 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1747 const char *argv[], void *aux OVS_UNUSED)
1748 {
1749 struct netdev *netdev = netdev_from_name(argv[1]);
1750
1751 if (netdev && is_dummy_class(netdev->netdev_class)) {
1752 struct in6_addr ip6;
1753 char *error;
1754 uint32_t plen;
1755
1756 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1757 if (!error) {
1758 struct in6_addr mask;
1759
1760 mask = ipv6_create_mask(plen);
1761 netdev_dummy_set_in6(netdev, &ip6, &mask);
1762 unixctl_command_reply(conn, "OK");
1763 } else {
1764 unixctl_command_reply_error(conn, error);
1765 free(error);
1766 }
1767 } else {
1768 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1769 }
1770
1771 netdev_close(netdev);
1772 }
1773
1774
1775 static void
1776 netdev_dummy_override(const char *type)
1777 {
1778 if (!netdev_unregister_provider(type)) {
1779 struct netdev_class *class;
1780 int error;
1781
1782 class = xmemdup(&dummy_class, sizeof dummy_class);
1783 class->type = xstrdup(type);
1784 error = netdev_register_provider(class);
1785 if (error) {
1786 VLOG_ERR("%s: failed to register netdev provider (%s)",
1787 type, ovs_strerror(error));
1788 free(CONST_CAST(char *, class->type));
1789 free(class);
1790 }
1791 }
1792 }
1793
1794 void
1795 netdev_dummy_register(enum dummy_level level)
1796 {
1797 unixctl_command_register("netdev-dummy/receive",
1798 "name [--qid queue_id] packet|flow [--len packet_len]",
1799 2, INT_MAX, netdev_dummy_receive, NULL);
1800 unixctl_command_register("netdev-dummy/set-admin-state",
1801 "[netdev] up|down", 1, 2,
1802 netdev_dummy_set_admin_state, NULL);
1803 unixctl_command_register("netdev-dummy/conn-state",
1804 "[netdev]", 0, 1,
1805 netdev_dummy_conn_state, NULL);
1806 unixctl_command_register("netdev-dummy/ip4addr",
1807 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1808 netdev_dummy_ip4addr, NULL);
1809 unixctl_command_register("netdev-dummy/ip6addr",
1810 "[netdev] ip6addr", 2, 2,
1811 netdev_dummy_ip6addr, NULL);
1812
1813 if (level == DUMMY_OVERRIDE_ALL) {
1814 struct sset types;
1815 const char *type;
1816
1817 sset_init(&types);
1818 netdev_enumerate_types(&types);
1819 SSET_FOR_EACH (type, &types) {
1820 if (strcmp(type, "patch")) {
1821 netdev_dummy_override(type);
1822 }
1823 }
1824 sset_destroy(&types);
1825 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1826 netdev_dummy_override("system");
1827 }
1828 netdev_register_provider(&dummy_class);
1829 netdev_register_provider(&dummy_internal_class);
1830 netdev_register_provider(&dummy_pmd_class);
1831
1832 netdev_vport_tunnel_register();
1833 }