]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
ofp-actions: Add extension to support "group" action in OF1.0.
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22 #include <unistd.h>
23
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
26 #include "flow.h"
27 #include "netdev-provider.h"
28 #include "netdev-vport.h"
29 #include "odp-util.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "openvswitch/list.h"
32 #include "openvswitch/ofp-print.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
36 #include "packets.h"
37 #include "pcap-file.h"
38 #include "poll-loop.h"
39 #include "shash.h"
40 #include "sset.h"
41 #include "stream.h"
42 #include "unaligned.h"
43 #include "timeval.h"
44 #include "unixctl.h"
45 #include "reconnect.h"
46
47 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
48
49 struct reconnect;
50
51 struct dummy_packet_stream {
52 struct stream *stream;
53 struct dp_packet rxbuf;
54 struct ovs_list txq;
55 };
56
57 enum dummy_packet_conn_type {
58 NONE, /* No connection is configured. */
59 PASSIVE, /* Listener. */
60 ACTIVE /* Connect to listener. */
61 };
62
63 enum dummy_netdev_conn_state {
64 CONN_STATE_CONNECTED, /* Listener connected. */
65 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
66 CONN_STATE_UNKNOWN, /* No relavent information. */
67 };
68
69 struct dummy_packet_pconn {
70 struct pstream *pstream;
71 struct dummy_packet_stream *streams;
72 size_t n_streams;
73 };
74
75 struct dummy_packet_rconn {
76 struct dummy_packet_stream *rstream;
77 struct reconnect *reconnect;
78 };
79
80 struct dummy_packet_conn {
81 enum dummy_packet_conn_type type;
82 union {
83 struct dummy_packet_pconn pconn;
84 struct dummy_packet_rconn rconn;
85 } u;
86 };
87
88 struct pkt_list_node {
89 struct dp_packet *pkt;
90 struct ovs_list list_node;
91 };
92
93 /* Protects 'dummy_list'. */
94 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95
96 /* Contains all 'struct dummy_dev's. */
97 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
98 = OVS_LIST_INITIALIZER(&dummy_list);
99
100 struct netdev_dummy {
101 struct netdev up;
102
103 /* In dummy_list. */
104 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105
106 /* Protects all members below. */
107 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108
109 struct eth_addr hwaddr OVS_GUARDED;
110 int mtu OVS_GUARDED;
111 struct netdev_stats stats OVS_GUARDED;
112 enum netdev_flags flags OVS_GUARDED;
113 int ifindex OVS_GUARDED;
114 int numa_id OVS_GUARDED;
115
116 struct dummy_packet_conn conn OVS_GUARDED;
117
118 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
119
120 struct in_addr address, netmask;
121 struct in6_addr ipv6, ipv6_mask;
122 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
123
124 /* The following properties are for dummy-pmd and they cannot be changed
125 * when a device is running, so we remember the request and update them
126 * next time netdev_dummy_reconfigure() is called. */
127 int requested_n_txq OVS_GUARDED;
128 int requested_n_rxq OVS_GUARDED;
129 int requested_numa_id OVS_GUARDED;
130 };
131
132 /* Max 'recv_queue_len' in struct netdev_dummy. */
133 #define NETDEV_DUMMY_MAX_QUEUE 100
134
135 struct netdev_rxq_dummy {
136 struct netdev_rxq up;
137 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
138 struct ovs_list recv_queue;
139 int recv_queue_len; /* ovs_list_size(&recv_queue). */
140 struct seq *seq; /* Reports newly queued packets. */
141 };
142
143 static unixctl_cb_func netdev_dummy_set_admin_state;
144 static int netdev_dummy_construct(struct netdev *);
145 static void netdev_dummy_queue_packet(struct netdev_dummy *,
146 struct dp_packet *, int);
147
148 static void dummy_packet_stream_close(struct dummy_packet_stream *);
149
150 static void pkt_list_delete(struct ovs_list *);
151
152 static bool
153 is_dummy_class(const struct netdev_class *class)
154 {
155 return class->construct == netdev_dummy_construct;
156 }
157
158 static struct netdev_dummy *
159 netdev_dummy_cast(const struct netdev *netdev)
160 {
161 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
162 return CONTAINER_OF(netdev, struct netdev_dummy, up);
163 }
164
165 static struct netdev_rxq_dummy *
166 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
167 {
168 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
169 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
170 }
171
172 static void
173 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
174 {
175 int rxbuf_size = stream ? 2048 : 0;
176 s->stream = stream;
177 dp_packet_init(&s->rxbuf, rxbuf_size);
178 ovs_list_init(&s->txq);
179 }
180
181 static struct dummy_packet_stream *
182 dummy_packet_stream_create(struct stream *stream)
183 {
184 struct dummy_packet_stream *s;
185
186 s = xzalloc(sizeof *s);
187 dummy_packet_stream_init(s, stream);
188
189 return s;
190 }
191
192 static void
193 dummy_packet_stream_wait(struct dummy_packet_stream *s)
194 {
195 stream_run_wait(s->stream);
196 if (!ovs_list_is_empty(&s->txq)) {
197 stream_send_wait(s->stream);
198 }
199 stream_recv_wait(s->stream);
200 }
201
202 static void
203 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
204 {
205 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
206 struct dp_packet *b;
207 struct pkt_list_node *node;
208
209 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
210 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
211
212 node = xmalloc(sizeof *node);
213 node->pkt = b;
214 ovs_list_push_back(&s->txq, &node->list_node);
215 }
216 }
217
218 static int
219 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
220 {
221 int error = 0;
222 size_t n;
223
224 stream_run(s->stream);
225
226 if (!ovs_list_is_empty(&s->txq)) {
227 struct pkt_list_node *txbuf_node;
228 struct dp_packet *txbuf;
229 int retval;
230
231 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
232 txbuf = txbuf_node->pkt;
233 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
234
235 if (retval > 0) {
236 dp_packet_pull(txbuf, retval);
237 if (!dp_packet_size(txbuf)) {
238 ovs_list_remove(&txbuf_node->list_node);
239 free(txbuf_node);
240 dp_packet_delete(txbuf);
241 }
242 } else if (retval != -EAGAIN) {
243 error = -retval;
244 }
245 }
246
247 if (!error) {
248 if (dp_packet_size(&s->rxbuf) < 2) {
249 n = 2 - dp_packet_size(&s->rxbuf);
250 } else {
251 uint16_t frame_len;
252
253 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
254 if (frame_len < ETH_HEADER_LEN) {
255 error = EPROTO;
256 n = 0;
257 } else {
258 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
259 }
260 }
261 }
262 if (!error) {
263 int retval;
264
265 dp_packet_prealloc_tailroom(&s->rxbuf, n);
266 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
267
268 if (retval > 0) {
269 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
270 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
271 dp_packet_pull(&s->rxbuf, 2);
272 netdev_dummy_queue_packet(dev,
273 dp_packet_clone(&s->rxbuf), 0);
274 dp_packet_clear(&s->rxbuf);
275 }
276 } else if (retval != -EAGAIN) {
277 error = (retval < 0 ? -retval
278 : dp_packet_size(&s->rxbuf) ? EPROTO
279 : EOF);
280 }
281 }
282
283 return error;
284 }
285
286 static void
287 dummy_packet_stream_close(struct dummy_packet_stream *s)
288 {
289 stream_close(s->stream);
290 dp_packet_uninit(&s->rxbuf);
291 pkt_list_delete(&s->txq);
292 }
293
294 static void
295 dummy_packet_conn_init(struct dummy_packet_conn *conn)
296 {
297 memset(conn, 0, sizeof *conn);
298 conn->type = NONE;
299 }
300
301 static void
302 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
303 {
304
305 switch (conn->type) {
306 case PASSIVE:
307 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
308 break;
309
310 case ACTIVE:
311 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
312 break;
313
314 case NONE:
315 default:
316 break;
317 }
318 }
319
320 static void
321 dummy_packet_conn_close(struct dummy_packet_conn *conn)
322 {
323 int i;
324 struct dummy_packet_pconn *pconn = &conn->u.pconn;
325 struct dummy_packet_rconn *rconn = &conn->u.rconn;
326
327 switch (conn->type) {
328 case PASSIVE:
329 pstream_close(pconn->pstream);
330 for (i = 0; i < pconn->n_streams; i++) {
331 dummy_packet_stream_close(&pconn->streams[i]);
332 }
333 free(pconn->streams);
334 pconn->pstream = NULL;
335 pconn->streams = NULL;
336 break;
337
338 case ACTIVE:
339 dummy_packet_stream_close(rconn->rstream);
340 free(rconn->rstream);
341 rconn->rstream = NULL;
342 reconnect_destroy(rconn->reconnect);
343 rconn->reconnect = NULL;
344 break;
345
346 case NONE:
347 default:
348 break;
349 }
350
351 conn->type = NONE;
352 memset(conn, 0, sizeof *conn);
353 }
354
355 static void
356 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
357 const struct smap *args)
358 {
359 const char *pstream = smap_get(args, "pstream");
360 const char *stream = smap_get(args, "stream");
361
362 if (pstream && stream) {
363 VLOG_WARN("Open failed: both %s and %s are configured",
364 pstream, stream);
365 return;
366 }
367
368 switch (conn->type) {
369 case PASSIVE:
370 if (pstream &&
371 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
372 return;
373 }
374 dummy_packet_conn_close(conn);
375 break;
376 case ACTIVE:
377 if (stream &&
378 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
379 return;
380 }
381 dummy_packet_conn_close(conn);
382 break;
383 case NONE:
384 default:
385 break;
386 }
387
388 if (pstream) {
389 int error;
390
391 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
392 if (error) {
393 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
394 } else {
395 conn->type = PASSIVE;
396 }
397 }
398
399 if (stream) {
400 int error;
401 struct stream *active_stream;
402 struct reconnect *reconnect;
403
404 reconnect = reconnect_create(time_msec());
405 reconnect_set_name(reconnect, stream);
406 reconnect_set_passive(reconnect, false, time_msec());
407 reconnect_enable(reconnect, time_msec());
408 reconnect_set_backoff(reconnect, 100, INT_MAX);
409 reconnect_set_probe_interval(reconnect, 0);
410 conn->u.rconn.reconnect = reconnect;
411 conn->type = ACTIVE;
412
413 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
414 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
415
416 switch (error) {
417 case 0:
418 reconnect_connected(reconnect, time_msec());
419 break;
420
421 case EAGAIN:
422 reconnect_connecting(reconnect, time_msec());
423 break;
424
425 default:
426 reconnect_connect_failed(reconnect, time_msec(), error);
427 stream_close(active_stream);
428 conn->u.rconn.rstream->stream = NULL;
429 break;
430 }
431 }
432 }
433
434 static void
435 dummy_pconn_run(struct netdev_dummy *dev)
436 OVS_REQUIRES(dev->mutex)
437 {
438 struct stream *new_stream;
439 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
440 int error;
441 size_t i;
442
443 error = pstream_accept(pconn->pstream, &new_stream);
444 if (!error) {
445 struct dummy_packet_stream *s;
446
447 pconn->streams = xrealloc(pconn->streams,
448 ((pconn->n_streams + 1)
449 * sizeof *s));
450 s = &pconn->streams[pconn->n_streams++];
451 dummy_packet_stream_init(s, new_stream);
452 } else if (error != EAGAIN) {
453 VLOG_WARN("%s: accept failed (%s)",
454 pstream_get_name(pconn->pstream), ovs_strerror(error));
455 pstream_close(pconn->pstream);
456 pconn->pstream = NULL;
457 dev->conn.type = NONE;
458 }
459
460 for (i = 0; i < pconn->n_streams; i++) {
461 struct dummy_packet_stream *s = &pconn->streams[i];
462
463 error = dummy_packet_stream_run(dev, s);
464 if (error) {
465 VLOG_DBG("%s: closing connection (%s)",
466 stream_get_name(s->stream),
467 ovs_retval_to_string(error));
468 dummy_packet_stream_close(s);
469 pconn->streams[i] = pconn->streams[--pconn->n_streams];
470 }
471 }
472 }
473
474 static void
475 dummy_rconn_run(struct netdev_dummy *dev)
476 OVS_REQUIRES(dev->mutex)
477 {
478 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
479
480 switch (reconnect_run(rconn->reconnect, time_msec())) {
481 case RECONNECT_CONNECT:
482 {
483 int error;
484
485 if (rconn->rstream->stream) {
486 error = stream_connect(rconn->rstream->stream);
487 } else {
488 error = stream_open(reconnect_get_name(rconn->reconnect),
489 &rconn->rstream->stream, DSCP_DEFAULT);
490 }
491
492 switch (error) {
493 case 0:
494 reconnect_connected(rconn->reconnect, time_msec());
495 break;
496
497 case EAGAIN:
498 reconnect_connecting(rconn->reconnect, time_msec());
499 break;
500
501 default:
502 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
503 stream_close(rconn->rstream->stream);
504 rconn->rstream->stream = NULL;
505 break;
506 }
507 }
508 break;
509
510 case RECONNECT_DISCONNECT:
511 case RECONNECT_PROBE:
512 default:
513 break;
514 }
515
516 if (reconnect_is_connected(rconn->reconnect)) {
517 int err;
518
519 err = dummy_packet_stream_run(dev, rconn->rstream);
520
521 if (err) {
522 reconnect_disconnected(rconn->reconnect, time_msec(), err);
523 stream_close(rconn->rstream->stream);
524 rconn->rstream->stream = NULL;
525 }
526 }
527 }
528
529 static void
530 dummy_packet_conn_run(struct netdev_dummy *dev)
531 OVS_REQUIRES(dev->mutex)
532 {
533 switch (dev->conn.type) {
534 case PASSIVE:
535 dummy_pconn_run(dev);
536 break;
537
538 case ACTIVE:
539 dummy_rconn_run(dev);
540 break;
541
542 case NONE:
543 default:
544 break;
545 }
546 }
547
548 static void
549 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
550 {
551 int i;
552 switch (conn->type) {
553 case PASSIVE:
554 pstream_wait(conn->u.pconn.pstream);
555 for (i = 0; i < conn->u.pconn.n_streams; i++) {
556 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
557 dummy_packet_stream_wait(s);
558 }
559 break;
560 case ACTIVE:
561 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
562 dummy_packet_stream_wait(conn->u.rconn.rstream);
563 }
564 break;
565
566 case NONE:
567 default:
568 break;
569 }
570 }
571
572 static void
573 dummy_packet_conn_send(struct dummy_packet_conn *conn,
574 const void *buffer, size_t size)
575 {
576 int i;
577
578 switch (conn->type) {
579 case PASSIVE:
580 for (i = 0; i < conn->u.pconn.n_streams; i++) {
581 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
582
583 dummy_packet_stream_send(s, buffer, size);
584 pstream_wait(conn->u.pconn.pstream);
585 }
586 break;
587
588 case ACTIVE:
589 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
590 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
591 dummy_packet_stream_wait(conn->u.rconn.rstream);
592 }
593 break;
594
595 case NONE:
596 default:
597 break;
598 }
599 }
600
601 static enum dummy_netdev_conn_state
602 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
603 {
604 enum dummy_netdev_conn_state state;
605
606 if (conn->type == ACTIVE) {
607 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
608 state = CONN_STATE_CONNECTED;
609 } else {
610 state = CONN_STATE_NOT_CONNECTED;
611 }
612 } else {
613 state = CONN_STATE_UNKNOWN;
614 }
615
616 return state;
617 }
618
619 static void
620 netdev_dummy_run(void)
621 {
622 struct netdev_dummy *dev;
623
624 ovs_mutex_lock(&dummy_list_mutex);
625 LIST_FOR_EACH (dev, list_node, &dummy_list) {
626 ovs_mutex_lock(&dev->mutex);
627 dummy_packet_conn_run(dev);
628 ovs_mutex_unlock(&dev->mutex);
629 }
630 ovs_mutex_unlock(&dummy_list_mutex);
631 }
632
633 static void
634 netdev_dummy_wait(void)
635 {
636 struct netdev_dummy *dev;
637
638 ovs_mutex_lock(&dummy_list_mutex);
639 LIST_FOR_EACH (dev, list_node, &dummy_list) {
640 ovs_mutex_lock(&dev->mutex);
641 dummy_packet_conn_wait(&dev->conn);
642 ovs_mutex_unlock(&dev->mutex);
643 }
644 ovs_mutex_unlock(&dummy_list_mutex);
645 }
646
647 static struct netdev *
648 netdev_dummy_alloc(void)
649 {
650 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
651 return &netdev->up;
652 }
653
654 static int
655 netdev_dummy_construct(struct netdev *netdev_)
656 {
657 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
658 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
659 unsigned int n;
660
661 n = atomic_count_inc(&next_n);
662
663 ovs_mutex_init(&netdev->mutex);
664 ovs_mutex_lock(&netdev->mutex);
665 netdev->hwaddr.ea[0] = 0xaa;
666 netdev->hwaddr.ea[1] = 0x55;
667 netdev->hwaddr.ea[2] = n >> 24;
668 netdev->hwaddr.ea[3] = n >> 16;
669 netdev->hwaddr.ea[4] = n >> 8;
670 netdev->hwaddr.ea[5] = n;
671 netdev->mtu = 1500;
672 netdev->flags = 0;
673 netdev->ifindex = -EOPNOTSUPP;
674 netdev->requested_n_rxq = netdev_->n_rxq;
675 netdev->requested_n_txq = netdev_->n_txq;
676 netdev->numa_id = 0;
677
678 dummy_packet_conn_init(&netdev->conn);
679
680 ovs_list_init(&netdev->rxes);
681 ovs_mutex_unlock(&netdev->mutex);
682
683 ovs_mutex_lock(&dummy_list_mutex);
684 ovs_list_push_back(&dummy_list, &netdev->list_node);
685 ovs_mutex_unlock(&dummy_list_mutex);
686
687 return 0;
688 }
689
690 static void
691 netdev_dummy_destruct(struct netdev *netdev_)
692 {
693 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
694
695 ovs_mutex_lock(&dummy_list_mutex);
696 ovs_list_remove(&netdev->list_node);
697 ovs_mutex_unlock(&dummy_list_mutex);
698
699 ovs_mutex_lock(&netdev->mutex);
700 dummy_packet_conn_close(&netdev->conn);
701 netdev->conn.type = NONE;
702
703 ovs_mutex_unlock(&netdev->mutex);
704 ovs_mutex_destroy(&netdev->mutex);
705 }
706
707 static void
708 netdev_dummy_dealloc(struct netdev *netdev_)
709 {
710 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
711
712 free(netdev);
713 }
714
715 static int
716 netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
717 {
718 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
719
720 ovs_mutex_lock(&netdev->mutex);
721
722 if (netdev->ifindex >= 0) {
723 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
724 }
725
726 dummy_packet_conn_get_config(&netdev->conn, args);
727
728 /* 'dummy-pmd' specific config. */
729 if (!netdev_is_pmd(dev)) {
730 goto exit;
731 }
732 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
733 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
734 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
735 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
736
737 exit:
738 ovs_mutex_unlock(&netdev->mutex);
739 return 0;
740 }
741
742 static int
743 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
744 struct in6_addr **pmask, int *n_addr)
745 {
746 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
747 int cnt = 0, i = 0, err = 0;
748 struct in6_addr *addr, *mask;
749
750 ovs_mutex_lock(&netdev->mutex);
751 if (netdev->address.s_addr != INADDR_ANY) {
752 cnt++;
753 }
754
755 if (ipv6_addr_is_set(&netdev->ipv6)) {
756 cnt++;
757 }
758 if (!cnt) {
759 err = EADDRNOTAVAIL;
760 goto out;
761 }
762 addr = xmalloc(sizeof *addr * cnt);
763 mask = xmalloc(sizeof *mask * cnt);
764 if (netdev->address.s_addr != INADDR_ANY) {
765 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
766 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
767 i++;
768 }
769
770 if (ipv6_addr_is_set(&netdev->ipv6)) {
771 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
772 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
773 i++;
774 }
775 if (paddr) {
776 *paddr = addr;
777 *pmask = mask;
778 *n_addr = cnt;
779 } else {
780 free(addr);
781 free(mask);
782 }
783 out:
784 ovs_mutex_unlock(&netdev->mutex);
785
786 return err;
787 }
788
789 static int
790 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
791 struct in_addr netmask)
792 {
793 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
794
795 ovs_mutex_lock(&netdev->mutex);
796 netdev->address = address;
797 netdev->netmask = netmask;
798 netdev_change_seq_changed(netdev_);
799 ovs_mutex_unlock(&netdev->mutex);
800
801 return 0;
802 }
803
804 static int
805 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
806 struct in6_addr *mask)
807 {
808 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
809
810 ovs_mutex_lock(&netdev->mutex);
811 netdev->ipv6 = *in6;
812 netdev->ipv6_mask = *mask;
813 netdev_change_seq_changed(netdev_);
814 ovs_mutex_unlock(&netdev->mutex);
815
816 return 0;
817 }
818
819 static int
820 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
821 {
822 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
823 const char *pcap;
824 int new_n_rxq, new_n_txq, new_numa_id;
825
826 ovs_mutex_lock(&netdev->mutex);
827 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
828
829 dummy_packet_conn_set_config(&netdev->conn, args);
830
831 if (netdev->rxq_pcap) {
832 fclose(netdev->rxq_pcap);
833 }
834 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
835 fclose(netdev->tx_pcap);
836 }
837 netdev->rxq_pcap = netdev->tx_pcap = NULL;
838 pcap = smap_get(args, "pcap");
839 if (pcap) {
840 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
841 } else {
842 const char *rxq_pcap = smap_get(args, "rxq_pcap");
843 const char *tx_pcap = smap_get(args, "tx_pcap");
844
845 if (rxq_pcap) {
846 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
847 }
848 if (tx_pcap) {
849 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
850 }
851 }
852
853 netdev_change_seq_changed(netdev_);
854
855 /* 'dummy-pmd' specific config. */
856 if (!netdev_->netdev_class->is_pmd) {
857 goto exit;
858 }
859
860 new_n_rxq = MAX(smap_get_int(args, "n_rxq", netdev->requested_n_rxq), 1);
861 new_n_txq = MAX(smap_get_int(args, "n_txq", netdev->requested_n_txq), 1);
862 new_numa_id = smap_get_int(args, "numa_id", 0);
863 if (new_n_rxq != netdev->requested_n_rxq
864 || new_n_txq != netdev->requested_n_txq
865 || new_numa_id != netdev->requested_numa_id) {
866 netdev->requested_n_rxq = new_n_rxq;
867 netdev->requested_n_txq = new_n_txq;
868 netdev->requested_numa_id = new_numa_id;
869 netdev_request_reconfigure(netdev_);
870 }
871
872 exit:
873 ovs_mutex_unlock(&netdev->mutex);
874 return 0;
875 }
876
877 static int
878 netdev_dummy_get_numa_id(const struct netdev *netdev_)
879 {
880 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
881
882 ovs_mutex_lock(&netdev->mutex);
883 int numa_id = netdev->numa_id;
884 ovs_mutex_unlock(&netdev->mutex);
885
886 return numa_id;
887 }
888
889 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
890 static int
891 netdev_dummy_reconfigure(struct netdev *netdev_)
892 {
893 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
894
895 ovs_mutex_lock(&netdev->mutex);
896
897 netdev_->n_txq = netdev->requested_n_txq;
898 netdev_->n_rxq = netdev->requested_n_rxq;
899 netdev->numa_id = netdev->requested_numa_id;
900
901 ovs_mutex_unlock(&netdev->mutex);
902 return 0;
903 }
904
905 static struct netdev_rxq *
906 netdev_dummy_rxq_alloc(void)
907 {
908 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
909 return &rx->up;
910 }
911
912 static int
913 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
914 {
915 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
916 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
917
918 ovs_mutex_lock(&netdev->mutex);
919 ovs_list_push_back(&netdev->rxes, &rx->node);
920 ovs_list_init(&rx->recv_queue);
921 rx->recv_queue_len = 0;
922 rx->seq = seq_create();
923 ovs_mutex_unlock(&netdev->mutex);
924
925 return 0;
926 }
927
928 static void
929 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
930 {
931 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
932 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
933
934 ovs_mutex_lock(&netdev->mutex);
935 ovs_list_remove(&rx->node);
936 pkt_list_delete(&rx->recv_queue);
937 ovs_mutex_unlock(&netdev->mutex);
938 seq_destroy(rx->seq);
939 }
940
941 static void
942 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
943 {
944 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
945
946 free(rx);
947 }
948
949 static int
950 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
951 int *c)
952 {
953 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
954 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
955 struct dp_packet *packet;
956
957 ovs_mutex_lock(&netdev->mutex);
958 if (!ovs_list_is_empty(&rx->recv_queue)) {
959 struct pkt_list_node *pkt_node;
960
961 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
962 packet = pkt_node->pkt;
963 free(pkt_node);
964 rx->recv_queue_len--;
965 } else {
966 packet = NULL;
967 }
968 ovs_mutex_unlock(&netdev->mutex);
969
970 if (!packet) {
971 if (netdev_is_pmd(&netdev->up)) {
972 /* If 'netdev' is a PMD device, this is called as part of the PMD
973 * thread busy loop. We yield here (without quiescing) for two
974 * reasons:
975 *
976 * - To reduce the CPU utilization during the testsuite
977 * - To give valgrind a chance to switch thread. According
978 * to the valgrind documentation, there's a big lock that
979 * prevents multiple thread from being executed at the same
980 * time. On my system, without this sleep, the pmd threads
981 * testcases fail under valgrind, because ovs-vswitchd becomes
982 * unresponsive. */
983 sched_yield();
984 }
985 return EAGAIN;
986 }
987 ovs_mutex_lock(&netdev->mutex);
988 netdev->stats.rx_packets++;
989 netdev->stats.rx_bytes += dp_packet_size(packet);
990 ovs_mutex_unlock(&netdev->mutex);
991
992 dp_packet_pad(packet);
993
994 arr[0] = packet;
995 *c = 1;
996 return 0;
997 }
998
999 static void
1000 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
1001 {
1002 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1003 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1004 uint64_t seq = seq_read(rx->seq);
1005
1006 ovs_mutex_lock(&netdev->mutex);
1007 if (!ovs_list_is_empty(&rx->recv_queue)) {
1008 poll_immediate_wake();
1009 } else {
1010 seq_wait(rx->seq, seq);
1011 }
1012 ovs_mutex_unlock(&netdev->mutex);
1013 }
1014
1015 static int
1016 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
1017 {
1018 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1019 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1020
1021 ovs_mutex_lock(&netdev->mutex);
1022 pkt_list_delete(&rx->recv_queue);
1023 rx->recv_queue_len = 0;
1024 ovs_mutex_unlock(&netdev->mutex);
1025
1026 seq_change(rx->seq);
1027
1028 return 0;
1029 }
1030
1031 static int
1032 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
1033 struct dp_packet **pkts, int cnt, bool may_steal)
1034 {
1035 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1036 int error = 0;
1037 int i;
1038
1039 for (i = 0; i < cnt; i++) {
1040 const void *buffer = dp_packet_data(pkts[i]);
1041 size_t size = dp_packet_size(pkts[i]);
1042
1043 size -= dp_packet_get_cutlen(pkts[i]);
1044
1045 if (size < ETH_HEADER_LEN) {
1046 error = EMSGSIZE;
1047 break;
1048 } else {
1049 const struct eth_header *eth = buffer;
1050 int max_size;
1051
1052 ovs_mutex_lock(&dev->mutex);
1053 max_size = dev->mtu + ETH_HEADER_LEN;
1054 ovs_mutex_unlock(&dev->mutex);
1055
1056 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1057 max_size += VLAN_HEADER_LEN;
1058 }
1059 if (size > max_size) {
1060 error = EMSGSIZE;
1061 break;
1062 }
1063 }
1064
1065 ovs_mutex_lock(&dev->mutex);
1066 dev->stats.tx_packets++;
1067 dev->stats.tx_bytes += size;
1068
1069 dummy_packet_conn_send(&dev->conn, buffer, size);
1070
1071 /* Reply to ARP requests for 'dev''s assigned IP address. */
1072 if (dev->address.s_addr) {
1073 struct dp_packet packet;
1074 struct flow flow;
1075
1076 dp_packet_use_const(&packet, buffer, size);
1077 flow_extract(&packet, &flow);
1078 if (flow.dl_type == htons(ETH_TYPE_ARP)
1079 && flow.nw_proto == ARP_OP_REQUEST
1080 && flow.nw_dst == dev->address.s_addr) {
1081 struct dp_packet *reply = dp_packet_new(0);
1082 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1083 false, flow.nw_dst, flow.nw_src);
1084 netdev_dummy_queue_packet(dev, reply, 0);
1085 }
1086 }
1087
1088 if (dev->tx_pcap) {
1089 struct dp_packet packet;
1090
1091 dp_packet_use_const(&packet, buffer, size);
1092 ovs_pcap_write(dev->tx_pcap, &packet);
1093 fflush(dev->tx_pcap);
1094 }
1095
1096 ovs_mutex_unlock(&dev->mutex);
1097 }
1098
1099 if (may_steal) {
1100 for (i = 0; i < cnt; i++) {
1101 dp_packet_delete(pkts[i]);
1102 }
1103 }
1104
1105 return error;
1106 }
1107
1108 static int
1109 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1110 {
1111 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1112
1113 ovs_mutex_lock(&dev->mutex);
1114 if (!eth_addr_equals(dev->hwaddr, mac)) {
1115 dev->hwaddr = mac;
1116 netdev_change_seq_changed(netdev);
1117 }
1118 ovs_mutex_unlock(&dev->mutex);
1119
1120 return 0;
1121 }
1122
1123 static int
1124 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1125 {
1126 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1127
1128 ovs_mutex_lock(&dev->mutex);
1129 *mac = dev->hwaddr;
1130 ovs_mutex_unlock(&dev->mutex);
1131
1132 return 0;
1133 }
1134
1135 static int
1136 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1137 {
1138 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1139
1140 ovs_mutex_lock(&dev->mutex);
1141 *mtup = dev->mtu;
1142 ovs_mutex_unlock(&dev->mutex);
1143
1144 return 0;
1145 }
1146
1147 static int
1148 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
1149 {
1150 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1151
1152 ovs_mutex_lock(&dev->mutex);
1153 dev->mtu = mtu;
1154 ovs_mutex_unlock(&dev->mutex);
1155
1156 return 0;
1157 }
1158
1159 static int
1160 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1161 {
1162 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1163
1164 ovs_mutex_lock(&dev->mutex);
1165 /* Passing only collected counters */
1166 stats->tx_packets = dev->stats.tx_packets;
1167 stats->tx_bytes = dev->stats.tx_bytes;
1168 stats->rx_packets = dev->stats.rx_packets;
1169 stats->rx_bytes = dev->stats.rx_bytes;
1170 ovs_mutex_unlock(&dev->mutex);
1171
1172 return 0;
1173 }
1174
1175 static int
1176 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1177 unsigned int queue_id, struct smap *details OVS_UNUSED)
1178 {
1179 if (queue_id == 0) {
1180 return 0;
1181 } else {
1182 return EINVAL;
1183 }
1184 }
1185
1186 static void
1187 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1188 {
1189 *stats = (struct netdev_queue_stats) {
1190 .tx_bytes = UINT64_MAX,
1191 .tx_packets = UINT64_MAX,
1192 .tx_errors = UINT64_MAX,
1193 .created = LLONG_MIN,
1194 };
1195 }
1196
1197 static int
1198 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1199 unsigned int queue_id,
1200 struct netdev_queue_stats *stats)
1201 {
1202 if (queue_id == 0) {
1203 netdev_dummy_init_queue_stats(stats);
1204 return 0;
1205 } else {
1206 return EINVAL;
1207 }
1208 }
1209
1210 struct netdev_dummy_queue_state {
1211 unsigned int next_queue;
1212 };
1213
1214 static int
1215 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1216 void **statep)
1217 {
1218 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1219 state->next_queue = 0;
1220 *statep = state;
1221 return 0;
1222 }
1223
1224 static int
1225 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1226 void *state_,
1227 unsigned int *queue_id,
1228 struct smap *details OVS_UNUSED)
1229 {
1230 struct netdev_dummy_queue_state *state = state_;
1231 if (state->next_queue == 0) {
1232 *queue_id = 0;
1233 state->next_queue++;
1234 return 0;
1235 } else {
1236 return EOF;
1237 }
1238 }
1239
1240 static int
1241 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1242 void *state)
1243 {
1244 free(state);
1245 return 0;
1246 }
1247
1248 static int
1249 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1250 void (*cb)(unsigned int queue_id,
1251 struct netdev_queue_stats *,
1252 void *aux),
1253 void *aux)
1254 {
1255 struct netdev_queue_stats stats;
1256 netdev_dummy_init_queue_stats(&stats);
1257 cb(0, &stats, aux);
1258 return 0;
1259 }
1260
1261 static int
1262 netdev_dummy_get_ifindex(const struct netdev *netdev)
1263 {
1264 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1265 int ifindex;
1266
1267 ovs_mutex_lock(&dev->mutex);
1268 ifindex = dev->ifindex;
1269 ovs_mutex_unlock(&dev->mutex);
1270
1271 return ifindex;
1272 }
1273
1274 static int
1275 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1276 enum netdev_flags off, enum netdev_flags on,
1277 enum netdev_flags *old_flagsp)
1278 OVS_REQUIRES(netdev->mutex)
1279 {
1280 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1281 return EINVAL;
1282 }
1283
1284 *old_flagsp = netdev->flags;
1285 netdev->flags |= on;
1286 netdev->flags &= ~off;
1287 if (*old_flagsp != netdev->flags) {
1288 netdev_change_seq_changed(&netdev->up);
1289 }
1290
1291 return 0;
1292 }
1293
1294 static int
1295 netdev_dummy_update_flags(struct netdev *netdev_,
1296 enum netdev_flags off, enum netdev_flags on,
1297 enum netdev_flags *old_flagsp)
1298 {
1299 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1300 int error;
1301
1302 ovs_mutex_lock(&netdev->mutex);
1303 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1304 ovs_mutex_unlock(&netdev->mutex);
1305
1306 return error;
1307 }
1308 \f
1309 /* Helper functions. */
1310
1311 #define NETDEV_DUMMY_CLASS(NAME, PMD, RECOFIGURE) \
1312 { \
1313 NAME, \
1314 PMD, /* is_pmd */ \
1315 NULL, /* init */ \
1316 netdev_dummy_run, \
1317 netdev_dummy_wait, \
1318 \
1319 netdev_dummy_alloc, \
1320 netdev_dummy_construct, \
1321 netdev_dummy_destruct, \
1322 netdev_dummy_dealloc, \
1323 netdev_dummy_get_config, \
1324 netdev_dummy_set_config, \
1325 NULL, /* get_tunnel_config */ \
1326 NULL, /* build header */ \
1327 NULL, /* push header */ \
1328 NULL, /* pop header */ \
1329 netdev_dummy_get_numa_id, \
1330 NULL, /* set_tx_multiq */ \
1331 \
1332 netdev_dummy_send, /* send */ \
1333 NULL, /* send_wait */ \
1334 \
1335 netdev_dummy_set_etheraddr, \
1336 netdev_dummy_get_etheraddr, \
1337 netdev_dummy_get_mtu, \
1338 netdev_dummy_set_mtu, \
1339 netdev_dummy_get_ifindex, \
1340 NULL, /* get_carrier */ \
1341 NULL, /* get_carrier_resets */ \
1342 NULL, /* get_miimon */ \
1343 netdev_dummy_get_stats, \
1344 \
1345 NULL, /* get_features */ \
1346 NULL, /* set_advertisements */ \
1347 \
1348 NULL, /* set_policing */ \
1349 NULL, /* get_qos_types */ \
1350 NULL, /* get_qos_capabilities */ \
1351 NULL, /* get_qos */ \
1352 NULL, /* set_qos */ \
1353 netdev_dummy_get_queue, \
1354 NULL, /* set_queue */ \
1355 NULL, /* delete_queue */ \
1356 netdev_dummy_get_queue_stats, \
1357 netdev_dummy_queue_dump_start, \
1358 netdev_dummy_queue_dump_next, \
1359 netdev_dummy_queue_dump_done, \
1360 netdev_dummy_dump_queue_stats, \
1361 \
1362 NULL, /* set_in4 */ \
1363 netdev_dummy_get_addr_list, \
1364 NULL, /* add_router */ \
1365 NULL, /* get_next_hop */ \
1366 NULL, /* get_status */ \
1367 NULL, /* arp_lookup */ \
1368 \
1369 netdev_dummy_update_flags, \
1370 RECOFIGURE, \
1371 \
1372 netdev_dummy_rxq_alloc, \
1373 netdev_dummy_rxq_construct, \
1374 netdev_dummy_rxq_destruct, \
1375 netdev_dummy_rxq_dealloc, \
1376 netdev_dummy_rxq_recv, \
1377 netdev_dummy_rxq_wait, \
1378 netdev_dummy_rxq_drain, \
1379 }
1380
1381 static const struct netdev_class dummy_class =
1382 NETDEV_DUMMY_CLASS("dummy", false, NULL);
1383
1384 static const struct netdev_class dummy_pmd_class =
1385 NETDEV_DUMMY_CLASS("dummy-pmd", true,
1386 netdev_dummy_reconfigure);
1387
1388 static void
1389 pkt_list_delete(struct ovs_list *l)
1390 {
1391 struct pkt_list_node *pkt;
1392
1393 LIST_FOR_EACH_POP(pkt, list_node, l) {
1394 dp_packet_delete(pkt->pkt);
1395 free(pkt);
1396 }
1397 }
1398
1399 static struct dp_packet *
1400 eth_from_packet_or_flow(const char *s)
1401 {
1402 enum odp_key_fitness fitness;
1403 struct dp_packet *packet;
1404 struct ofpbuf odp_key;
1405 struct flow flow;
1406 int error;
1407
1408 if (!eth_from_hex(s, &packet)) {
1409 return packet;
1410 }
1411
1412 /* Convert string to datapath key.
1413 *
1414 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1415 * the code for that currently calls exit() on parse error. We have to
1416 * settle for parsing a datapath key for now.
1417 */
1418 ofpbuf_init(&odp_key, 0);
1419 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1420 if (error) {
1421 ofpbuf_uninit(&odp_key);
1422 return NULL;
1423 }
1424
1425 /* Convert odp_key to flow. */
1426 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1427 if (fitness == ODP_FIT_ERROR) {
1428 ofpbuf_uninit(&odp_key);
1429 return NULL;
1430 }
1431
1432 packet = dp_packet_new(0);
1433 flow_compose(packet, &flow);
1434
1435 ofpbuf_uninit(&odp_key);
1436 return packet;
1437 }
1438
1439 static void
1440 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1441 {
1442 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1443
1444 pkt_node->pkt = packet;
1445 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1446 rx->recv_queue_len++;
1447 seq_change(rx->seq);
1448 }
1449
1450 static void
1451 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
1452 int queue_id)
1453 OVS_REQUIRES(dummy->mutex)
1454 {
1455 struct netdev_rxq_dummy *rx, *prev;
1456
1457 if (dummy->rxq_pcap) {
1458 ovs_pcap_write(dummy->rxq_pcap, packet);
1459 fflush(dummy->rxq_pcap);
1460 }
1461 prev = NULL;
1462 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1463 if (rx->up.queue_id == queue_id &&
1464 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1465 if (prev) {
1466 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1467 }
1468 prev = rx;
1469 }
1470 }
1471 if (prev) {
1472 netdev_dummy_queue_packet__(prev, packet);
1473 } else {
1474 dp_packet_delete(packet);
1475 }
1476 }
1477
1478 static void
1479 netdev_dummy_receive(struct unixctl_conn *conn,
1480 int argc, const char *argv[], void *aux OVS_UNUSED)
1481 {
1482 struct netdev_dummy *dummy_dev;
1483 struct netdev *netdev;
1484 int i, k = 1, rx_qid = 0;
1485
1486 netdev = netdev_from_name(argv[k++]);
1487 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1488 unixctl_command_reply_error(conn, "no such dummy netdev");
1489 goto exit_netdev;
1490 }
1491 dummy_dev = netdev_dummy_cast(netdev);
1492
1493 ovs_mutex_lock(&dummy_dev->mutex);
1494
1495 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1496 rx_qid = strtol(argv[k + 1], NULL, 10);
1497 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1498 unixctl_command_reply_error(conn, "bad rx queue id.");
1499 goto exit;
1500 }
1501 k += 2;
1502 }
1503
1504 for (i = k; i < argc; i++) {
1505 struct dp_packet *packet;
1506
1507 packet = eth_from_packet_or_flow(argv[i]);
1508 if (!packet) {
1509 unixctl_command_reply_error(conn, "bad packet syntax");
1510 goto exit;
1511 }
1512
1513 netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
1514 }
1515
1516 unixctl_command_reply(conn, NULL);
1517
1518 exit:
1519 ovs_mutex_unlock(&dummy_dev->mutex);
1520 exit_netdev:
1521 netdev_close(netdev);
1522 }
1523
1524 static void
1525 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1526 OVS_REQUIRES(dev->mutex)
1527 {
1528 enum netdev_flags old_flags;
1529
1530 if (admin_state) {
1531 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1532 } else {
1533 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1534 }
1535 }
1536
1537 static void
1538 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1539 const char *argv[], void *aux OVS_UNUSED)
1540 {
1541 bool up;
1542
1543 if (!strcasecmp(argv[argc - 1], "up")) {
1544 up = true;
1545 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1546 up = false;
1547 } else {
1548 unixctl_command_reply_error(conn, "Invalid Admin State");
1549 return;
1550 }
1551
1552 if (argc > 2) {
1553 struct netdev *netdev = netdev_from_name(argv[1]);
1554 if (netdev && is_dummy_class(netdev->netdev_class)) {
1555 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1556
1557 ovs_mutex_lock(&dummy_dev->mutex);
1558 netdev_dummy_set_admin_state__(dummy_dev, up);
1559 ovs_mutex_unlock(&dummy_dev->mutex);
1560
1561 netdev_close(netdev);
1562 } else {
1563 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1564 netdev_close(netdev);
1565 return;
1566 }
1567 } else {
1568 struct netdev_dummy *netdev;
1569
1570 ovs_mutex_lock(&dummy_list_mutex);
1571 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1572 ovs_mutex_lock(&netdev->mutex);
1573 netdev_dummy_set_admin_state__(netdev, up);
1574 ovs_mutex_unlock(&netdev->mutex);
1575 }
1576 ovs_mutex_unlock(&dummy_list_mutex);
1577 }
1578 unixctl_command_reply(conn, "OK");
1579 }
1580
1581 static void
1582 display_conn_state__(struct ds *s, const char *name,
1583 enum dummy_netdev_conn_state state)
1584 {
1585 ds_put_format(s, "%s: ", name);
1586
1587 switch (state) {
1588 case CONN_STATE_CONNECTED:
1589 ds_put_cstr(s, "connected\n");
1590 break;
1591
1592 case CONN_STATE_NOT_CONNECTED:
1593 ds_put_cstr(s, "disconnected\n");
1594 break;
1595
1596 case CONN_STATE_UNKNOWN:
1597 default:
1598 ds_put_cstr(s, "unknown\n");
1599 break;
1600 };
1601 }
1602
1603 static void
1604 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1605 const char *argv[], void *aux OVS_UNUSED)
1606 {
1607 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1608 struct ds s;
1609
1610 ds_init(&s);
1611
1612 if (argc > 1) {
1613 const char *dev_name = argv[1];
1614 struct netdev *netdev = netdev_from_name(dev_name);
1615
1616 if (netdev && is_dummy_class(netdev->netdev_class)) {
1617 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1618
1619 ovs_mutex_lock(&dummy_dev->mutex);
1620 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1621 ovs_mutex_unlock(&dummy_dev->mutex);
1622
1623 netdev_close(netdev);
1624 }
1625 display_conn_state__(&s, dev_name, state);
1626 } else {
1627 struct netdev_dummy *netdev;
1628
1629 ovs_mutex_lock(&dummy_list_mutex);
1630 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1631 ovs_mutex_lock(&netdev->mutex);
1632 state = dummy_netdev_get_conn_state(&netdev->conn);
1633 ovs_mutex_unlock(&netdev->mutex);
1634 if (state != CONN_STATE_UNKNOWN) {
1635 display_conn_state__(&s, netdev->up.name, state);
1636 }
1637 }
1638 ovs_mutex_unlock(&dummy_list_mutex);
1639 }
1640
1641 unixctl_command_reply(conn, ds_cstr(&s));
1642 ds_destroy(&s);
1643 }
1644
1645 static void
1646 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1647 const char *argv[], void *aux OVS_UNUSED)
1648 {
1649 struct netdev *netdev = netdev_from_name(argv[1]);
1650
1651 if (netdev && is_dummy_class(netdev->netdev_class)) {
1652 struct in_addr ip, mask;
1653 char *error;
1654
1655 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1656 if (!error) {
1657 netdev_dummy_set_in4(netdev, ip, mask);
1658 unixctl_command_reply(conn, "OK");
1659 } else {
1660 unixctl_command_reply_error(conn, error);
1661 free(error);
1662 }
1663 } else {
1664 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1665 }
1666
1667 netdev_close(netdev);
1668 }
1669
1670 static void
1671 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1672 const char *argv[], void *aux OVS_UNUSED)
1673 {
1674 struct netdev *netdev = netdev_from_name(argv[1]);
1675
1676 if (netdev && is_dummy_class(netdev->netdev_class)) {
1677 struct in6_addr ip6;
1678 char *error;
1679 uint32_t plen;
1680
1681 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1682 if (!error) {
1683 struct in6_addr mask;
1684
1685 mask = ipv6_create_mask(plen);
1686 netdev_dummy_set_in6(netdev, &ip6, &mask);
1687 unixctl_command_reply(conn, "OK");
1688 } else {
1689 unixctl_command_reply_error(conn, error);
1690 free(error);
1691 }
1692 netdev_close(netdev);
1693 } else {
1694 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1695 }
1696
1697 netdev_close(netdev);
1698 }
1699
1700
1701 static void
1702 netdev_dummy_override(const char *type)
1703 {
1704 if (!netdev_unregister_provider(type)) {
1705 struct netdev_class *class;
1706 int error;
1707
1708 class = xmemdup(&dummy_class, sizeof dummy_class);
1709 class->type = xstrdup(type);
1710 error = netdev_register_provider(class);
1711 if (error) {
1712 VLOG_ERR("%s: failed to register netdev provider (%s)",
1713 type, ovs_strerror(error));
1714 free(CONST_CAST(char *, class->type));
1715 free(class);
1716 }
1717 }
1718 }
1719
1720 void
1721 netdev_dummy_register(enum dummy_level level)
1722 {
1723 unixctl_command_register("netdev-dummy/receive",
1724 "name [--qid queue_id] packet|flow...",
1725 2, INT_MAX, netdev_dummy_receive, NULL);
1726 unixctl_command_register("netdev-dummy/set-admin-state",
1727 "[netdev] up|down", 1, 2,
1728 netdev_dummy_set_admin_state, NULL);
1729 unixctl_command_register("netdev-dummy/conn-state",
1730 "[netdev]", 0, 1,
1731 netdev_dummy_conn_state, NULL);
1732 unixctl_command_register("netdev-dummy/ip4addr",
1733 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1734 netdev_dummy_ip4addr, NULL);
1735 unixctl_command_register("netdev-dummy/ip6addr",
1736 "[netdev] ip6addr", 2, 2,
1737 netdev_dummy_ip6addr, NULL);
1738
1739 if (level == DUMMY_OVERRIDE_ALL) {
1740 struct sset types;
1741 const char *type;
1742
1743 sset_init(&types);
1744 netdev_enumerate_types(&types);
1745 SSET_FOR_EACH (type, &types) {
1746 if (strcmp(type, "patch")) {
1747 netdev_dummy_override(type);
1748 }
1749 }
1750 sset_destroy(&types);
1751 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1752 netdev_dummy_override("system");
1753 }
1754 netdev_register_provider(&dummy_class);
1755 netdev_register_provider(&dummy_pmd_class);
1756
1757 netdev_vport_tunnel_register();
1758 }