]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
ofp-actions: Add truncate action.
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22 #include <unistd.h>
23
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
26 #include "flow.h"
27 #include "netdev-provider.h"
28 #include "netdev-vport.h"
29 #include "odp-util.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "openvswitch/list.h"
32 #include "openvswitch/ofp-print.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
36 #include "packets.h"
37 #include "pcap-file.h"
38 #include "poll-loop.h"
39 #include "shash.h"
40 #include "sset.h"
41 #include "stream.h"
42 #include "unaligned.h"
43 #include "timeval.h"
44 #include "unixctl.h"
45 #include "reconnect.h"
46
47 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
48
49 struct reconnect;
50
51 struct dummy_packet_stream {
52 struct stream *stream;
53 struct dp_packet rxbuf;
54 struct ovs_list txq;
55 };
56
57 enum dummy_packet_conn_type {
58 NONE, /* No connection is configured. */
59 PASSIVE, /* Listener. */
60 ACTIVE /* Connect to listener. */
61 };
62
63 enum dummy_netdev_conn_state {
64 CONN_STATE_CONNECTED, /* Listener connected. */
65 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
66 CONN_STATE_UNKNOWN, /* No relavent information. */
67 };
68
69 struct dummy_packet_pconn {
70 struct pstream *pstream;
71 struct dummy_packet_stream *streams;
72 size_t n_streams;
73 };
74
75 struct dummy_packet_rconn {
76 struct dummy_packet_stream *rstream;
77 struct reconnect *reconnect;
78 };
79
80 struct dummy_packet_conn {
81 enum dummy_packet_conn_type type;
82 union {
83 struct dummy_packet_pconn pconn;
84 struct dummy_packet_rconn rconn;
85 } u;
86 };
87
88 struct pkt_list_node {
89 struct dp_packet *pkt;
90 struct ovs_list list_node;
91 };
92
93 /* Protects 'dummy_list'. */
94 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95
96 /* Contains all 'struct dummy_dev's. */
97 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
98 = OVS_LIST_INITIALIZER(&dummy_list);
99
100 struct netdev_dummy {
101 struct netdev up;
102
103 /* In dummy_list. */
104 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105
106 /* Protects all members below. */
107 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108
109 struct eth_addr hwaddr OVS_GUARDED;
110 int mtu OVS_GUARDED;
111 struct netdev_stats stats OVS_GUARDED;
112 enum netdev_flags flags OVS_GUARDED;
113 int ifindex OVS_GUARDED;
114
115 struct dummy_packet_conn conn OVS_GUARDED;
116
117 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
118
119 struct in_addr address, netmask;
120 struct in6_addr ipv6, ipv6_mask;
121 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
122
123 /* The following properties are for dummy-pmd and they cannot be changed
124 * when a device is running, so we remember the request and update them
125 * next time netdev_dummy_reconfigure() is called. */
126 int requested_n_txq;
127 int requested_n_rxq;
128 };
129
130 /* Max 'recv_queue_len' in struct netdev_dummy. */
131 #define NETDEV_DUMMY_MAX_QUEUE 100
132
133 struct netdev_rxq_dummy {
134 struct netdev_rxq up;
135 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
136 struct ovs_list recv_queue;
137 int recv_queue_len; /* ovs_list_size(&recv_queue). */
138 struct seq *seq; /* Reports newly queued packets. */
139 };
140
141 static unixctl_cb_func netdev_dummy_set_admin_state;
142 static int netdev_dummy_construct(struct netdev *);
143 static void netdev_dummy_queue_packet(struct netdev_dummy *,
144 struct dp_packet *, int);
145
146 static void dummy_packet_stream_close(struct dummy_packet_stream *);
147
148 static void pkt_list_delete(struct ovs_list *);
149
150 static bool
151 is_dummy_class(const struct netdev_class *class)
152 {
153 return class->construct == netdev_dummy_construct;
154 }
155
156 static struct netdev_dummy *
157 netdev_dummy_cast(const struct netdev *netdev)
158 {
159 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
160 return CONTAINER_OF(netdev, struct netdev_dummy, up);
161 }
162
163 static struct netdev_rxq_dummy *
164 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
165 {
166 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
167 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
168 }
169
170 static void
171 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
172 {
173 int rxbuf_size = stream ? 2048 : 0;
174 s->stream = stream;
175 dp_packet_init(&s->rxbuf, rxbuf_size);
176 ovs_list_init(&s->txq);
177 }
178
179 static struct dummy_packet_stream *
180 dummy_packet_stream_create(struct stream *stream)
181 {
182 struct dummy_packet_stream *s;
183
184 s = xzalloc(sizeof *s);
185 dummy_packet_stream_init(s, stream);
186
187 return s;
188 }
189
190 static void
191 dummy_packet_stream_wait(struct dummy_packet_stream *s)
192 {
193 stream_run_wait(s->stream);
194 if (!ovs_list_is_empty(&s->txq)) {
195 stream_send_wait(s->stream);
196 }
197 stream_recv_wait(s->stream);
198 }
199
200 static void
201 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
202 {
203 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
204 struct dp_packet *b;
205 struct pkt_list_node *node;
206
207 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
208 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
209
210 node = xmalloc(sizeof *node);
211 node->pkt = b;
212 ovs_list_push_back(&s->txq, &node->list_node);
213 }
214 }
215
216 static int
217 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
218 {
219 int error = 0;
220 size_t n;
221
222 stream_run(s->stream);
223
224 if (!ovs_list_is_empty(&s->txq)) {
225 struct pkt_list_node *txbuf_node;
226 struct dp_packet *txbuf;
227 int retval;
228
229 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
230 txbuf = txbuf_node->pkt;
231 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
232
233 if (retval > 0) {
234 dp_packet_pull(txbuf, retval);
235 if (!dp_packet_size(txbuf)) {
236 ovs_list_remove(&txbuf_node->list_node);
237 free(txbuf_node);
238 dp_packet_delete(txbuf);
239 }
240 } else if (retval != -EAGAIN) {
241 error = -retval;
242 }
243 }
244
245 if (!error) {
246 if (dp_packet_size(&s->rxbuf) < 2) {
247 n = 2 - dp_packet_size(&s->rxbuf);
248 } else {
249 uint16_t frame_len;
250
251 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
252 if (frame_len < ETH_HEADER_LEN) {
253 error = EPROTO;
254 n = 0;
255 } else {
256 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
257 }
258 }
259 }
260 if (!error) {
261 int retval;
262
263 dp_packet_prealloc_tailroom(&s->rxbuf, n);
264 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
265
266 if (retval > 0) {
267 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
268 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
269 dp_packet_pull(&s->rxbuf, 2);
270 netdev_dummy_queue_packet(dev,
271 dp_packet_clone(&s->rxbuf), 0);
272 dp_packet_clear(&s->rxbuf);
273 }
274 } else if (retval != -EAGAIN) {
275 error = (retval < 0 ? -retval
276 : dp_packet_size(&s->rxbuf) ? EPROTO
277 : EOF);
278 }
279 }
280
281 return error;
282 }
283
284 static void
285 dummy_packet_stream_close(struct dummy_packet_stream *s)
286 {
287 stream_close(s->stream);
288 dp_packet_uninit(&s->rxbuf);
289 pkt_list_delete(&s->txq);
290 }
291
292 static void
293 dummy_packet_conn_init(struct dummy_packet_conn *conn)
294 {
295 memset(conn, 0, sizeof *conn);
296 conn->type = NONE;
297 }
298
299 static void
300 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
301 {
302
303 switch (conn->type) {
304 case PASSIVE:
305 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
306 break;
307
308 case ACTIVE:
309 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
310 break;
311
312 case NONE:
313 default:
314 break;
315 }
316 }
317
318 static void
319 dummy_packet_conn_close(struct dummy_packet_conn *conn)
320 {
321 int i;
322 struct dummy_packet_pconn *pconn = &conn->u.pconn;
323 struct dummy_packet_rconn *rconn = &conn->u.rconn;
324
325 switch (conn->type) {
326 case PASSIVE:
327 pstream_close(pconn->pstream);
328 for (i = 0; i < pconn->n_streams; i++) {
329 dummy_packet_stream_close(&pconn->streams[i]);
330 }
331 free(pconn->streams);
332 pconn->pstream = NULL;
333 pconn->streams = NULL;
334 break;
335
336 case ACTIVE:
337 dummy_packet_stream_close(rconn->rstream);
338 free(rconn->rstream);
339 rconn->rstream = NULL;
340 reconnect_destroy(rconn->reconnect);
341 rconn->reconnect = NULL;
342 break;
343
344 case NONE:
345 default:
346 break;
347 }
348
349 conn->type = NONE;
350 memset(conn, 0, sizeof *conn);
351 }
352
353 static void
354 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
355 const struct smap *args)
356 {
357 const char *pstream = smap_get(args, "pstream");
358 const char *stream = smap_get(args, "stream");
359
360 if (pstream && stream) {
361 VLOG_WARN("Open failed: both %s and %s are configured",
362 pstream, stream);
363 return;
364 }
365
366 switch (conn->type) {
367 case PASSIVE:
368 if (pstream &&
369 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
370 return;
371 }
372 dummy_packet_conn_close(conn);
373 break;
374 case ACTIVE:
375 if (stream &&
376 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
377 return;
378 }
379 dummy_packet_conn_close(conn);
380 break;
381 case NONE:
382 default:
383 break;
384 }
385
386 if (pstream) {
387 int error;
388
389 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
390 if (error) {
391 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
392 } else {
393 conn->type = PASSIVE;
394 }
395 }
396
397 if (stream) {
398 int error;
399 struct stream *active_stream;
400 struct reconnect *reconnect;
401
402 reconnect = reconnect_create(time_msec());
403 reconnect_set_name(reconnect, stream);
404 reconnect_set_passive(reconnect, false, time_msec());
405 reconnect_enable(reconnect, time_msec());
406 reconnect_set_backoff(reconnect, 100, INT_MAX);
407 reconnect_set_probe_interval(reconnect, 0);
408 conn->u.rconn.reconnect = reconnect;
409 conn->type = ACTIVE;
410
411 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
412 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
413
414 switch (error) {
415 case 0:
416 reconnect_connected(reconnect, time_msec());
417 break;
418
419 case EAGAIN:
420 reconnect_connecting(reconnect, time_msec());
421 break;
422
423 default:
424 reconnect_connect_failed(reconnect, time_msec(), error);
425 stream_close(active_stream);
426 conn->u.rconn.rstream->stream = NULL;
427 break;
428 }
429 }
430 }
431
432 static void
433 dummy_pconn_run(struct netdev_dummy *dev)
434 OVS_REQUIRES(dev->mutex)
435 {
436 struct stream *new_stream;
437 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
438 int error;
439 size_t i;
440
441 error = pstream_accept(pconn->pstream, &new_stream);
442 if (!error) {
443 struct dummy_packet_stream *s;
444
445 pconn->streams = xrealloc(pconn->streams,
446 ((pconn->n_streams + 1)
447 * sizeof *s));
448 s = &pconn->streams[pconn->n_streams++];
449 dummy_packet_stream_init(s, new_stream);
450 } else if (error != EAGAIN) {
451 VLOG_WARN("%s: accept failed (%s)",
452 pstream_get_name(pconn->pstream), ovs_strerror(error));
453 pstream_close(pconn->pstream);
454 pconn->pstream = NULL;
455 dev->conn.type = NONE;
456 }
457
458 for (i = 0; i < pconn->n_streams; i++) {
459 struct dummy_packet_stream *s = &pconn->streams[i];
460
461 error = dummy_packet_stream_run(dev, s);
462 if (error) {
463 VLOG_DBG("%s: closing connection (%s)",
464 stream_get_name(s->stream),
465 ovs_retval_to_string(error));
466 dummy_packet_stream_close(s);
467 pconn->streams[i] = pconn->streams[--pconn->n_streams];
468 }
469 }
470 }
471
472 static void
473 dummy_rconn_run(struct netdev_dummy *dev)
474 OVS_REQUIRES(dev->mutex)
475 {
476 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
477
478 switch (reconnect_run(rconn->reconnect, time_msec())) {
479 case RECONNECT_CONNECT:
480 {
481 int error;
482
483 if (rconn->rstream->stream) {
484 error = stream_connect(rconn->rstream->stream);
485 } else {
486 error = stream_open(reconnect_get_name(rconn->reconnect),
487 &rconn->rstream->stream, DSCP_DEFAULT);
488 }
489
490 switch (error) {
491 case 0:
492 reconnect_connected(rconn->reconnect, time_msec());
493 break;
494
495 case EAGAIN:
496 reconnect_connecting(rconn->reconnect, time_msec());
497 break;
498
499 default:
500 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
501 stream_close(rconn->rstream->stream);
502 rconn->rstream->stream = NULL;
503 break;
504 }
505 }
506 break;
507
508 case RECONNECT_DISCONNECT:
509 case RECONNECT_PROBE:
510 default:
511 break;
512 }
513
514 if (reconnect_is_connected(rconn->reconnect)) {
515 int err;
516
517 err = dummy_packet_stream_run(dev, rconn->rstream);
518
519 if (err) {
520 reconnect_disconnected(rconn->reconnect, time_msec(), err);
521 stream_close(rconn->rstream->stream);
522 rconn->rstream->stream = NULL;
523 }
524 }
525 }
526
527 static void
528 dummy_packet_conn_run(struct netdev_dummy *dev)
529 OVS_REQUIRES(dev->mutex)
530 {
531 switch (dev->conn.type) {
532 case PASSIVE:
533 dummy_pconn_run(dev);
534 break;
535
536 case ACTIVE:
537 dummy_rconn_run(dev);
538 break;
539
540 case NONE:
541 default:
542 break;
543 }
544 }
545
546 static void
547 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
548 {
549 int i;
550 switch (conn->type) {
551 case PASSIVE:
552 pstream_wait(conn->u.pconn.pstream);
553 for (i = 0; i < conn->u.pconn.n_streams; i++) {
554 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
555 dummy_packet_stream_wait(s);
556 }
557 break;
558 case ACTIVE:
559 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
560 dummy_packet_stream_wait(conn->u.rconn.rstream);
561 }
562 break;
563
564 case NONE:
565 default:
566 break;
567 }
568 }
569
570 static void
571 dummy_packet_conn_send(struct dummy_packet_conn *conn,
572 const void *buffer, size_t size)
573 {
574 int i;
575
576 switch (conn->type) {
577 case PASSIVE:
578 for (i = 0; i < conn->u.pconn.n_streams; i++) {
579 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
580
581 dummy_packet_stream_send(s, buffer, size);
582 pstream_wait(conn->u.pconn.pstream);
583 }
584 break;
585
586 case ACTIVE:
587 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
588 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
589 dummy_packet_stream_wait(conn->u.rconn.rstream);
590 }
591 break;
592
593 case NONE:
594 default:
595 break;
596 }
597 }
598
599 static enum dummy_netdev_conn_state
600 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
601 {
602 enum dummy_netdev_conn_state state;
603
604 if (conn->type == ACTIVE) {
605 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
606 state = CONN_STATE_CONNECTED;
607 } else {
608 state = CONN_STATE_NOT_CONNECTED;
609 }
610 } else {
611 state = CONN_STATE_UNKNOWN;
612 }
613
614 return state;
615 }
616
617 static void
618 netdev_dummy_run(void)
619 {
620 struct netdev_dummy *dev;
621
622 ovs_mutex_lock(&dummy_list_mutex);
623 LIST_FOR_EACH (dev, list_node, &dummy_list) {
624 ovs_mutex_lock(&dev->mutex);
625 dummy_packet_conn_run(dev);
626 ovs_mutex_unlock(&dev->mutex);
627 }
628 ovs_mutex_unlock(&dummy_list_mutex);
629 }
630
631 static void
632 netdev_dummy_wait(void)
633 {
634 struct netdev_dummy *dev;
635
636 ovs_mutex_lock(&dummy_list_mutex);
637 LIST_FOR_EACH (dev, list_node, &dummy_list) {
638 ovs_mutex_lock(&dev->mutex);
639 dummy_packet_conn_wait(&dev->conn);
640 ovs_mutex_unlock(&dev->mutex);
641 }
642 ovs_mutex_unlock(&dummy_list_mutex);
643 }
644
645 static struct netdev *
646 netdev_dummy_alloc(void)
647 {
648 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
649 return &netdev->up;
650 }
651
652 static int
653 netdev_dummy_construct(struct netdev *netdev_)
654 {
655 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
656 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
657 unsigned int n;
658
659 n = atomic_count_inc(&next_n);
660
661 ovs_mutex_init(&netdev->mutex);
662 ovs_mutex_lock(&netdev->mutex);
663 netdev->hwaddr.ea[0] = 0xaa;
664 netdev->hwaddr.ea[1] = 0x55;
665 netdev->hwaddr.ea[2] = n >> 24;
666 netdev->hwaddr.ea[3] = n >> 16;
667 netdev->hwaddr.ea[4] = n >> 8;
668 netdev->hwaddr.ea[5] = n;
669 netdev->mtu = 1500;
670 netdev->flags = 0;
671 netdev->ifindex = -EOPNOTSUPP;
672 netdev->requested_n_rxq = netdev_->n_rxq;
673 netdev->requested_n_txq = netdev_->n_txq;
674
675 dummy_packet_conn_init(&netdev->conn);
676
677 ovs_list_init(&netdev->rxes);
678 ovs_mutex_unlock(&netdev->mutex);
679
680 ovs_mutex_lock(&dummy_list_mutex);
681 ovs_list_push_back(&dummy_list, &netdev->list_node);
682 ovs_mutex_unlock(&dummy_list_mutex);
683
684 return 0;
685 }
686
687 static void
688 netdev_dummy_destruct(struct netdev *netdev_)
689 {
690 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
691
692 ovs_mutex_lock(&dummy_list_mutex);
693 ovs_list_remove(&netdev->list_node);
694 ovs_mutex_unlock(&dummy_list_mutex);
695
696 ovs_mutex_lock(&netdev->mutex);
697 dummy_packet_conn_close(&netdev->conn);
698 netdev->conn.type = NONE;
699
700 ovs_mutex_unlock(&netdev->mutex);
701 ovs_mutex_destroy(&netdev->mutex);
702 }
703
704 static void
705 netdev_dummy_dealloc(struct netdev *netdev_)
706 {
707 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
708
709 free(netdev);
710 }
711
712 static int
713 netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
714 {
715 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
716
717 ovs_mutex_lock(&netdev->mutex);
718
719 if (netdev->ifindex >= 0) {
720 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
721 }
722
723 dummy_packet_conn_get_config(&netdev->conn, args);
724
725 /* 'dummy-pmd' specific config. */
726 if (!netdev_is_pmd(dev)) {
727 goto exit;
728 }
729 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
730 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
731 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
732 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
733
734 exit:
735 ovs_mutex_unlock(&netdev->mutex);
736 return 0;
737 }
738
739 static int
740 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
741 struct in6_addr **pmask, int *n_addr)
742 {
743 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
744 int cnt = 0, i = 0, err = 0;
745 struct in6_addr *addr, *mask;
746
747 ovs_mutex_lock(&netdev->mutex);
748 if (netdev->address.s_addr != INADDR_ANY) {
749 cnt++;
750 }
751
752 if (ipv6_addr_is_set(&netdev->ipv6)) {
753 cnt++;
754 }
755 if (!cnt) {
756 err = EADDRNOTAVAIL;
757 goto out;
758 }
759 addr = xmalloc(sizeof *addr * cnt);
760 mask = xmalloc(sizeof *mask * cnt);
761 if (netdev->address.s_addr != INADDR_ANY) {
762 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
763 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
764 i++;
765 }
766
767 if (ipv6_addr_is_set(&netdev->ipv6)) {
768 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
769 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
770 i++;
771 }
772 if (paddr) {
773 *paddr = addr;
774 *pmask = mask;
775 *n_addr = cnt;
776 } else {
777 free(addr);
778 free(mask);
779 }
780 out:
781 ovs_mutex_unlock(&netdev->mutex);
782
783 return err;
784 }
785
786 static int
787 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
788 struct in_addr netmask)
789 {
790 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
791
792 ovs_mutex_lock(&netdev->mutex);
793 netdev->address = address;
794 netdev->netmask = netmask;
795 netdev_change_seq_changed(netdev_);
796 ovs_mutex_unlock(&netdev->mutex);
797
798 return 0;
799 }
800
801 static int
802 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
803 struct in6_addr *mask)
804 {
805 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
806
807 ovs_mutex_lock(&netdev->mutex);
808 netdev->ipv6 = *in6;
809 netdev->ipv6_mask = *mask;
810 netdev_change_seq_changed(netdev_);
811 ovs_mutex_unlock(&netdev->mutex);
812
813 return 0;
814 }
815
816 static int
817 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
818 {
819 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
820 const char *pcap;
821 int new_n_rxq;
822
823 ovs_mutex_lock(&netdev->mutex);
824 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
825
826 dummy_packet_conn_set_config(&netdev->conn, args);
827
828 if (netdev->rxq_pcap) {
829 fclose(netdev->rxq_pcap);
830 }
831 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
832 fclose(netdev->tx_pcap);
833 }
834 netdev->rxq_pcap = netdev->tx_pcap = NULL;
835 pcap = smap_get(args, "pcap");
836 if (pcap) {
837 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
838 } else {
839 const char *rxq_pcap = smap_get(args, "rxq_pcap");
840 const char *tx_pcap = smap_get(args, "tx_pcap");
841
842 if (rxq_pcap) {
843 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
844 }
845 if (tx_pcap) {
846 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
847 }
848 }
849
850 netdev_change_seq_changed(netdev_);
851
852 /* 'dummy-pmd' specific config. */
853 if (!netdev_->netdev_class->is_pmd) {
854 goto exit;
855 }
856
857 new_n_rxq = MAX(smap_get_int(args, "n_rxq", netdev->requested_n_rxq), 1);
858 if (new_n_rxq != netdev->requested_n_rxq) {
859 netdev->requested_n_rxq = new_n_rxq;
860 netdev_request_reconfigure(netdev_);
861 }
862
863 exit:
864 ovs_mutex_unlock(&netdev->mutex);
865 return 0;
866 }
867
868 static int
869 netdev_dummy_get_numa_id(const struct netdev *netdev_ OVS_UNUSED)
870 {
871 return 0;
872 }
873
874 /* Requests the number of tx queues for the dummy PMD interface. */
875 static int
876 netdev_dummy_set_tx_multiq(struct netdev *netdev_, unsigned int n_txq)
877 {
878 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
879
880 ovs_mutex_lock(&netdev->mutex);
881
882 if (netdev_->n_txq == n_txq) {
883 goto out;
884 }
885
886 netdev->requested_n_txq = n_txq;
887 netdev_request_reconfigure(netdev_);
888
889 out:
890 ovs_mutex_unlock(&netdev->mutex);
891 return 0;
892 }
893
894 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
895 static int
896 netdev_dummy_reconfigure(struct netdev *netdev_)
897 {
898 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
899
900 ovs_mutex_lock(&netdev->mutex);
901
902 netdev_->n_txq = netdev->requested_n_txq;
903 netdev_->n_rxq = netdev->requested_n_rxq;
904
905 ovs_mutex_unlock(&netdev->mutex);
906 return 0;
907 }
908
909 static struct netdev_rxq *
910 netdev_dummy_rxq_alloc(void)
911 {
912 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
913 return &rx->up;
914 }
915
916 static int
917 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
918 {
919 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
920 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
921
922 ovs_mutex_lock(&netdev->mutex);
923 ovs_list_push_back(&netdev->rxes, &rx->node);
924 ovs_list_init(&rx->recv_queue);
925 rx->recv_queue_len = 0;
926 rx->seq = seq_create();
927 ovs_mutex_unlock(&netdev->mutex);
928
929 return 0;
930 }
931
932 static void
933 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
934 {
935 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
936 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
937
938 ovs_mutex_lock(&netdev->mutex);
939 ovs_list_remove(&rx->node);
940 pkt_list_delete(&rx->recv_queue);
941 ovs_mutex_unlock(&netdev->mutex);
942 seq_destroy(rx->seq);
943 }
944
945 static void
946 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
947 {
948 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
949
950 free(rx);
951 }
952
953 static int
954 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
955 int *c)
956 {
957 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
958 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
959 struct dp_packet *packet;
960
961 ovs_mutex_lock(&netdev->mutex);
962 if (!ovs_list_is_empty(&rx->recv_queue)) {
963 struct pkt_list_node *pkt_node;
964
965 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
966 packet = pkt_node->pkt;
967 free(pkt_node);
968 rx->recv_queue_len--;
969 } else {
970 packet = NULL;
971 }
972 ovs_mutex_unlock(&netdev->mutex);
973
974 if (!packet) {
975 if (netdev_is_pmd(&netdev->up)) {
976 /* If 'netdev' is a PMD device, this is called as part of the PMD
977 * thread busy loop. We yield here (without quiescing) for two
978 * reasons:
979 *
980 * - To reduce the CPU utilization during the testsuite
981 * - To give valgrind a chance to switch thread. According
982 * to the valgrind documentation, there's a big lock that
983 * prevents multiple thread from being executed at the same
984 * time. On my system, without this sleep, the pmd threads
985 * testcases fail under valgrind, because ovs-vswitchd becomes
986 * unresponsive. */
987 sched_yield();
988 }
989 return EAGAIN;
990 }
991 ovs_mutex_lock(&netdev->mutex);
992 netdev->stats.rx_packets++;
993 netdev->stats.rx_bytes += dp_packet_size(packet);
994 ovs_mutex_unlock(&netdev->mutex);
995
996 dp_packet_pad(packet);
997
998 arr[0] = packet;
999 *c = 1;
1000 return 0;
1001 }
1002
1003 static void
1004 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
1005 {
1006 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1007 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1008 uint64_t seq = seq_read(rx->seq);
1009
1010 ovs_mutex_lock(&netdev->mutex);
1011 if (!ovs_list_is_empty(&rx->recv_queue)) {
1012 poll_immediate_wake();
1013 } else {
1014 seq_wait(rx->seq, seq);
1015 }
1016 ovs_mutex_unlock(&netdev->mutex);
1017 }
1018
1019 static int
1020 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
1021 {
1022 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1023 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1024
1025 ovs_mutex_lock(&netdev->mutex);
1026 pkt_list_delete(&rx->recv_queue);
1027 rx->recv_queue_len = 0;
1028 ovs_mutex_unlock(&netdev->mutex);
1029
1030 seq_change(rx->seq);
1031
1032 return 0;
1033 }
1034
1035 static int
1036 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
1037 struct dp_packet **pkts, int cnt, bool may_steal)
1038 {
1039 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1040 int error = 0;
1041 int i;
1042
1043 for (i = 0; i < cnt; i++) {
1044 const void *buffer = dp_packet_data(pkts[i]);
1045 size_t size = dp_packet_size(pkts[i]);
1046
1047 size -= dp_packet_get_cutlen(pkts[i]);
1048
1049 if (size < ETH_HEADER_LEN) {
1050 error = EMSGSIZE;
1051 break;
1052 } else {
1053 const struct eth_header *eth = buffer;
1054 int max_size;
1055
1056 ovs_mutex_lock(&dev->mutex);
1057 max_size = dev->mtu + ETH_HEADER_LEN;
1058 ovs_mutex_unlock(&dev->mutex);
1059
1060 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1061 max_size += VLAN_HEADER_LEN;
1062 }
1063 if (size > max_size) {
1064 error = EMSGSIZE;
1065 break;
1066 }
1067 }
1068
1069 ovs_mutex_lock(&dev->mutex);
1070 dev->stats.tx_packets++;
1071 dev->stats.tx_bytes += size;
1072
1073 dummy_packet_conn_send(&dev->conn, buffer, size);
1074
1075 /* Reply to ARP requests for 'dev''s assigned IP address. */
1076 if (dev->address.s_addr) {
1077 struct dp_packet packet;
1078 struct flow flow;
1079
1080 dp_packet_use_const(&packet, buffer, size);
1081 flow_extract(&packet, &flow);
1082 if (flow.dl_type == htons(ETH_TYPE_ARP)
1083 && flow.nw_proto == ARP_OP_REQUEST
1084 && flow.nw_dst == dev->address.s_addr) {
1085 struct dp_packet *reply = dp_packet_new(0);
1086 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1087 false, flow.nw_dst, flow.nw_src);
1088 netdev_dummy_queue_packet(dev, reply, 0);
1089 }
1090 }
1091
1092 if (dev->tx_pcap) {
1093 struct dp_packet packet;
1094
1095 dp_packet_use_const(&packet, buffer, size);
1096 ovs_pcap_write(dev->tx_pcap, &packet);
1097 fflush(dev->tx_pcap);
1098 }
1099
1100 ovs_mutex_unlock(&dev->mutex);
1101 }
1102
1103 if (may_steal) {
1104 for (i = 0; i < cnt; i++) {
1105 dp_packet_delete(pkts[i]);
1106 }
1107 }
1108
1109 return error;
1110 }
1111
1112 static int
1113 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1114 {
1115 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1116
1117 ovs_mutex_lock(&dev->mutex);
1118 if (!eth_addr_equals(dev->hwaddr, mac)) {
1119 dev->hwaddr = mac;
1120 netdev_change_seq_changed(netdev);
1121 }
1122 ovs_mutex_unlock(&dev->mutex);
1123
1124 return 0;
1125 }
1126
1127 static int
1128 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1129 {
1130 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1131
1132 ovs_mutex_lock(&dev->mutex);
1133 *mac = dev->hwaddr;
1134 ovs_mutex_unlock(&dev->mutex);
1135
1136 return 0;
1137 }
1138
1139 static int
1140 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1141 {
1142 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1143
1144 ovs_mutex_lock(&dev->mutex);
1145 *mtup = dev->mtu;
1146 ovs_mutex_unlock(&dev->mutex);
1147
1148 return 0;
1149 }
1150
1151 static int
1152 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
1153 {
1154 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1155
1156 ovs_mutex_lock(&dev->mutex);
1157 dev->mtu = mtu;
1158 ovs_mutex_unlock(&dev->mutex);
1159
1160 return 0;
1161 }
1162
1163 static int
1164 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1165 {
1166 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1167
1168 ovs_mutex_lock(&dev->mutex);
1169 /* Passing only collected counters */
1170 stats->tx_packets = dev->stats.tx_packets;
1171 stats->tx_bytes = dev->stats.tx_bytes;
1172 stats->rx_packets = dev->stats.rx_packets;
1173 stats->rx_bytes = dev->stats.rx_bytes;
1174 ovs_mutex_unlock(&dev->mutex);
1175
1176 return 0;
1177 }
1178
1179 static int
1180 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1181 unsigned int queue_id, struct smap *details OVS_UNUSED)
1182 {
1183 if (queue_id == 0) {
1184 return 0;
1185 } else {
1186 return EINVAL;
1187 }
1188 }
1189
1190 static void
1191 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1192 {
1193 *stats = (struct netdev_queue_stats) {
1194 .tx_bytes = UINT64_MAX,
1195 .tx_packets = UINT64_MAX,
1196 .tx_errors = UINT64_MAX,
1197 .created = LLONG_MIN,
1198 };
1199 }
1200
1201 static int
1202 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1203 unsigned int queue_id,
1204 struct netdev_queue_stats *stats)
1205 {
1206 if (queue_id == 0) {
1207 netdev_dummy_init_queue_stats(stats);
1208 return 0;
1209 } else {
1210 return EINVAL;
1211 }
1212 }
1213
1214 struct netdev_dummy_queue_state {
1215 unsigned int next_queue;
1216 };
1217
1218 static int
1219 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1220 void **statep)
1221 {
1222 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1223 state->next_queue = 0;
1224 *statep = state;
1225 return 0;
1226 }
1227
1228 static int
1229 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1230 void *state_,
1231 unsigned int *queue_id,
1232 struct smap *details OVS_UNUSED)
1233 {
1234 struct netdev_dummy_queue_state *state = state_;
1235 if (state->next_queue == 0) {
1236 *queue_id = 0;
1237 state->next_queue++;
1238 return 0;
1239 } else {
1240 return EOF;
1241 }
1242 }
1243
1244 static int
1245 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1246 void *state)
1247 {
1248 free(state);
1249 return 0;
1250 }
1251
1252 static int
1253 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1254 void (*cb)(unsigned int queue_id,
1255 struct netdev_queue_stats *,
1256 void *aux),
1257 void *aux)
1258 {
1259 struct netdev_queue_stats stats;
1260 netdev_dummy_init_queue_stats(&stats);
1261 cb(0, &stats, aux);
1262 return 0;
1263 }
1264
1265 static int
1266 netdev_dummy_get_ifindex(const struct netdev *netdev)
1267 {
1268 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1269 int ifindex;
1270
1271 ovs_mutex_lock(&dev->mutex);
1272 ifindex = dev->ifindex;
1273 ovs_mutex_unlock(&dev->mutex);
1274
1275 return ifindex;
1276 }
1277
1278 static int
1279 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1280 enum netdev_flags off, enum netdev_flags on,
1281 enum netdev_flags *old_flagsp)
1282 OVS_REQUIRES(netdev->mutex)
1283 {
1284 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1285 return EINVAL;
1286 }
1287
1288 *old_flagsp = netdev->flags;
1289 netdev->flags |= on;
1290 netdev->flags &= ~off;
1291 if (*old_flagsp != netdev->flags) {
1292 netdev_change_seq_changed(&netdev->up);
1293 }
1294
1295 return 0;
1296 }
1297
1298 static int
1299 netdev_dummy_update_flags(struct netdev *netdev_,
1300 enum netdev_flags off, enum netdev_flags on,
1301 enum netdev_flags *old_flagsp)
1302 {
1303 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1304 int error;
1305
1306 ovs_mutex_lock(&netdev->mutex);
1307 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1308 ovs_mutex_unlock(&netdev->mutex);
1309
1310 return error;
1311 }
1312 \f
1313 /* Helper functions. */
1314
1315 #define NETDEV_DUMMY_CLASS(NAME, PMD, TX_MULTIQ, RECOFIGURE) \
1316 { \
1317 NAME, \
1318 PMD, /* is_pmd */ \
1319 NULL, /* init */ \
1320 netdev_dummy_run, \
1321 netdev_dummy_wait, \
1322 \
1323 netdev_dummy_alloc, \
1324 netdev_dummy_construct, \
1325 netdev_dummy_destruct, \
1326 netdev_dummy_dealloc, \
1327 netdev_dummy_get_config, \
1328 netdev_dummy_set_config, \
1329 NULL, /* get_tunnel_config */ \
1330 NULL, /* build header */ \
1331 NULL, /* push header */ \
1332 NULL, /* pop header */ \
1333 netdev_dummy_get_numa_id, \
1334 TX_MULTIQ, \
1335 \
1336 netdev_dummy_send, /* send */ \
1337 NULL, /* send_wait */ \
1338 \
1339 netdev_dummy_set_etheraddr, \
1340 netdev_dummy_get_etheraddr, \
1341 netdev_dummy_get_mtu, \
1342 netdev_dummy_set_mtu, \
1343 netdev_dummy_get_ifindex, \
1344 NULL, /* get_carrier */ \
1345 NULL, /* get_carrier_resets */ \
1346 NULL, /* get_miimon */ \
1347 netdev_dummy_get_stats, \
1348 \
1349 NULL, /* get_features */ \
1350 NULL, /* set_advertisements */ \
1351 \
1352 NULL, /* set_policing */ \
1353 NULL, /* get_qos_types */ \
1354 NULL, /* get_qos_capabilities */ \
1355 NULL, /* get_qos */ \
1356 NULL, /* set_qos */ \
1357 netdev_dummy_get_queue, \
1358 NULL, /* set_queue */ \
1359 NULL, /* delete_queue */ \
1360 netdev_dummy_get_queue_stats, \
1361 netdev_dummy_queue_dump_start, \
1362 netdev_dummy_queue_dump_next, \
1363 netdev_dummy_queue_dump_done, \
1364 netdev_dummy_dump_queue_stats, \
1365 \
1366 NULL, /* set_in4 */ \
1367 netdev_dummy_get_addr_list, \
1368 NULL, /* add_router */ \
1369 NULL, /* get_next_hop */ \
1370 NULL, /* get_status */ \
1371 NULL, /* arp_lookup */ \
1372 \
1373 netdev_dummy_update_flags, \
1374 RECOFIGURE, \
1375 \
1376 netdev_dummy_rxq_alloc, \
1377 netdev_dummy_rxq_construct, \
1378 netdev_dummy_rxq_destruct, \
1379 netdev_dummy_rxq_dealloc, \
1380 netdev_dummy_rxq_recv, \
1381 netdev_dummy_rxq_wait, \
1382 netdev_dummy_rxq_drain, \
1383 }
1384
1385 static const struct netdev_class dummy_class =
1386 NETDEV_DUMMY_CLASS("dummy", false, NULL, NULL);
1387
1388 static const struct netdev_class dummy_pmd_class =
1389 NETDEV_DUMMY_CLASS("dummy-pmd", true,
1390 netdev_dummy_set_tx_multiq,
1391 netdev_dummy_reconfigure);
1392
1393 static void
1394 pkt_list_delete(struct ovs_list *l)
1395 {
1396 struct pkt_list_node *pkt;
1397
1398 LIST_FOR_EACH_POP(pkt, list_node, l) {
1399 dp_packet_delete(pkt->pkt);
1400 free(pkt);
1401 }
1402 }
1403
1404 static struct dp_packet *
1405 eth_from_packet_or_flow(const char *s)
1406 {
1407 enum odp_key_fitness fitness;
1408 struct dp_packet *packet;
1409 struct ofpbuf odp_key;
1410 struct flow flow;
1411 int error;
1412
1413 if (!eth_from_hex(s, &packet)) {
1414 return packet;
1415 }
1416
1417 /* Convert string to datapath key.
1418 *
1419 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1420 * the code for that currently calls exit() on parse error. We have to
1421 * settle for parsing a datapath key for now.
1422 */
1423 ofpbuf_init(&odp_key, 0);
1424 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1425 if (error) {
1426 ofpbuf_uninit(&odp_key);
1427 return NULL;
1428 }
1429
1430 /* Convert odp_key to flow. */
1431 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1432 if (fitness == ODP_FIT_ERROR) {
1433 ofpbuf_uninit(&odp_key);
1434 return NULL;
1435 }
1436
1437 packet = dp_packet_new(0);
1438 flow_compose(packet, &flow);
1439
1440 ofpbuf_uninit(&odp_key);
1441 return packet;
1442 }
1443
1444 static void
1445 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1446 {
1447 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1448
1449 pkt_node->pkt = packet;
1450 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1451 rx->recv_queue_len++;
1452 seq_change(rx->seq);
1453 }
1454
1455 static void
1456 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
1457 int queue_id)
1458 OVS_REQUIRES(dummy->mutex)
1459 {
1460 struct netdev_rxq_dummy *rx, *prev;
1461
1462 if (dummy->rxq_pcap) {
1463 ovs_pcap_write(dummy->rxq_pcap, packet);
1464 fflush(dummy->rxq_pcap);
1465 }
1466 prev = NULL;
1467 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1468 if (rx->up.queue_id == queue_id &&
1469 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1470 if (prev) {
1471 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1472 }
1473 prev = rx;
1474 }
1475 }
1476 if (prev) {
1477 netdev_dummy_queue_packet__(prev, packet);
1478 } else {
1479 dp_packet_delete(packet);
1480 }
1481 }
1482
1483 static void
1484 netdev_dummy_receive(struct unixctl_conn *conn,
1485 int argc, const char *argv[], void *aux OVS_UNUSED)
1486 {
1487 struct netdev_dummy *dummy_dev;
1488 struct netdev *netdev;
1489 int i, k = 1, rx_qid = 0;
1490
1491 netdev = netdev_from_name(argv[k++]);
1492 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1493 unixctl_command_reply_error(conn, "no such dummy netdev");
1494 goto exit_netdev;
1495 }
1496 dummy_dev = netdev_dummy_cast(netdev);
1497
1498 ovs_mutex_lock(&dummy_dev->mutex);
1499
1500 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1501 rx_qid = strtol(argv[k + 1], NULL, 10);
1502 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1503 unixctl_command_reply_error(conn, "bad rx queue id.");
1504 goto exit;
1505 }
1506 k += 2;
1507 }
1508
1509 for (i = k; i < argc; i++) {
1510 struct dp_packet *packet;
1511
1512 packet = eth_from_packet_or_flow(argv[i]);
1513 if (!packet) {
1514 unixctl_command_reply_error(conn, "bad packet syntax");
1515 goto exit;
1516 }
1517
1518 netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
1519 }
1520
1521 unixctl_command_reply(conn, NULL);
1522
1523 exit:
1524 ovs_mutex_unlock(&dummy_dev->mutex);
1525 exit_netdev:
1526 netdev_close(netdev);
1527 }
1528
1529 static void
1530 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1531 OVS_REQUIRES(dev->mutex)
1532 {
1533 enum netdev_flags old_flags;
1534
1535 if (admin_state) {
1536 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1537 } else {
1538 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1539 }
1540 }
1541
1542 static void
1543 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1544 const char *argv[], void *aux OVS_UNUSED)
1545 {
1546 bool up;
1547
1548 if (!strcasecmp(argv[argc - 1], "up")) {
1549 up = true;
1550 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1551 up = false;
1552 } else {
1553 unixctl_command_reply_error(conn, "Invalid Admin State");
1554 return;
1555 }
1556
1557 if (argc > 2) {
1558 struct netdev *netdev = netdev_from_name(argv[1]);
1559 if (netdev && is_dummy_class(netdev->netdev_class)) {
1560 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1561
1562 ovs_mutex_lock(&dummy_dev->mutex);
1563 netdev_dummy_set_admin_state__(dummy_dev, up);
1564 ovs_mutex_unlock(&dummy_dev->mutex);
1565
1566 netdev_close(netdev);
1567 } else {
1568 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1569 netdev_close(netdev);
1570 return;
1571 }
1572 } else {
1573 struct netdev_dummy *netdev;
1574
1575 ovs_mutex_lock(&dummy_list_mutex);
1576 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1577 ovs_mutex_lock(&netdev->mutex);
1578 netdev_dummy_set_admin_state__(netdev, up);
1579 ovs_mutex_unlock(&netdev->mutex);
1580 }
1581 ovs_mutex_unlock(&dummy_list_mutex);
1582 }
1583 unixctl_command_reply(conn, "OK");
1584 }
1585
1586 static void
1587 display_conn_state__(struct ds *s, const char *name,
1588 enum dummy_netdev_conn_state state)
1589 {
1590 ds_put_format(s, "%s: ", name);
1591
1592 switch (state) {
1593 case CONN_STATE_CONNECTED:
1594 ds_put_cstr(s, "connected\n");
1595 break;
1596
1597 case CONN_STATE_NOT_CONNECTED:
1598 ds_put_cstr(s, "disconnected\n");
1599 break;
1600
1601 case CONN_STATE_UNKNOWN:
1602 default:
1603 ds_put_cstr(s, "unknown\n");
1604 break;
1605 };
1606 }
1607
1608 static void
1609 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1610 const char *argv[], void *aux OVS_UNUSED)
1611 {
1612 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1613 struct ds s;
1614
1615 ds_init(&s);
1616
1617 if (argc > 1) {
1618 const char *dev_name = argv[1];
1619 struct netdev *netdev = netdev_from_name(dev_name);
1620
1621 if (netdev && is_dummy_class(netdev->netdev_class)) {
1622 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1623
1624 ovs_mutex_lock(&dummy_dev->mutex);
1625 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1626 ovs_mutex_unlock(&dummy_dev->mutex);
1627
1628 netdev_close(netdev);
1629 }
1630 display_conn_state__(&s, dev_name, state);
1631 } else {
1632 struct netdev_dummy *netdev;
1633
1634 ovs_mutex_lock(&dummy_list_mutex);
1635 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1636 ovs_mutex_lock(&netdev->mutex);
1637 state = dummy_netdev_get_conn_state(&netdev->conn);
1638 ovs_mutex_unlock(&netdev->mutex);
1639 if (state != CONN_STATE_UNKNOWN) {
1640 display_conn_state__(&s, netdev->up.name, state);
1641 }
1642 }
1643 ovs_mutex_unlock(&dummy_list_mutex);
1644 }
1645
1646 unixctl_command_reply(conn, ds_cstr(&s));
1647 ds_destroy(&s);
1648 }
1649
1650 static void
1651 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1652 const char *argv[], void *aux OVS_UNUSED)
1653 {
1654 struct netdev *netdev = netdev_from_name(argv[1]);
1655
1656 if (netdev && is_dummy_class(netdev->netdev_class)) {
1657 struct in_addr ip, mask;
1658 char *error;
1659
1660 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1661 if (!error) {
1662 netdev_dummy_set_in4(netdev, ip, mask);
1663 unixctl_command_reply(conn, "OK");
1664 } else {
1665 unixctl_command_reply_error(conn, error);
1666 free(error);
1667 }
1668 } else {
1669 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1670 }
1671
1672 netdev_close(netdev);
1673 }
1674
1675 static void
1676 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1677 const char *argv[], void *aux OVS_UNUSED)
1678 {
1679 struct netdev *netdev = netdev_from_name(argv[1]);
1680
1681 if (netdev && is_dummy_class(netdev->netdev_class)) {
1682 struct in6_addr ip6;
1683 char *error;
1684 uint32_t plen;
1685
1686 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1687 if (!error) {
1688 struct in6_addr mask;
1689
1690 mask = ipv6_create_mask(plen);
1691 netdev_dummy_set_in6(netdev, &ip6, &mask);
1692 unixctl_command_reply(conn, "OK");
1693 } else {
1694 unixctl_command_reply_error(conn, error);
1695 free(error);
1696 }
1697 netdev_close(netdev);
1698 } else {
1699 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1700 }
1701
1702 netdev_close(netdev);
1703 }
1704
1705
1706 static void
1707 netdev_dummy_override(const char *type)
1708 {
1709 if (!netdev_unregister_provider(type)) {
1710 struct netdev_class *class;
1711 int error;
1712
1713 class = xmemdup(&dummy_class, sizeof dummy_class);
1714 class->type = xstrdup(type);
1715 error = netdev_register_provider(class);
1716 if (error) {
1717 VLOG_ERR("%s: failed to register netdev provider (%s)",
1718 type, ovs_strerror(error));
1719 free(CONST_CAST(char *, class->type));
1720 free(class);
1721 }
1722 }
1723 }
1724
1725 void
1726 netdev_dummy_register(enum dummy_level level)
1727 {
1728 unixctl_command_register("netdev-dummy/receive",
1729 "name [--qid queue_id] packet|flow...",
1730 2, INT_MAX, netdev_dummy_receive, NULL);
1731 unixctl_command_register("netdev-dummy/set-admin-state",
1732 "[netdev] up|down", 1, 2,
1733 netdev_dummy_set_admin_state, NULL);
1734 unixctl_command_register("netdev-dummy/conn-state",
1735 "[netdev]", 0, 1,
1736 netdev_dummy_conn_state, NULL);
1737 unixctl_command_register("netdev-dummy/ip4addr",
1738 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1739 netdev_dummy_ip4addr, NULL);
1740 unixctl_command_register("netdev-dummy/ip6addr",
1741 "[netdev] ip6addr", 2, 2,
1742 netdev_dummy_ip6addr, NULL);
1743
1744 if (level == DUMMY_OVERRIDE_ALL) {
1745 struct sset types;
1746 const char *type;
1747
1748 sset_init(&types);
1749 netdev_enumerate_types(&types);
1750 SSET_FOR_EACH (type, &types) {
1751 if (strcmp(type, "patch")) {
1752 netdev_dummy_override(type);
1753 }
1754 }
1755 sset_destroy(&types);
1756 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1757 netdev_dummy_override("system");
1758 }
1759 netdev_register_provider(&dummy_class);
1760 netdev_register_provider(&dummy_pmd_class);
1761
1762 netdev_vport_tunnel_register();
1763 }