]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
netdev: Adding a new netdev API to be used for offloading flows
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22 #include <unistd.h>
23
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
26 #include "flow.h"
27 #include "netdev-provider.h"
28 #include "netdev-vport.h"
29 #include "odp-util.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "openvswitch/list.h"
32 #include "openvswitch/ofp-print.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
36 #include "packets.h"
37 #include "pcap-file.h"
38 #include "poll-loop.h"
39 #include "openvswitch/shash.h"
40 #include "sset.h"
41 #include "stream.h"
42 #include "unaligned.h"
43 #include "timeval.h"
44 #include "unixctl.h"
45 #include "reconnect.h"
46
47 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
48
49 struct reconnect;
50
51 struct dummy_packet_stream {
52 struct stream *stream;
53 struct dp_packet rxbuf;
54 struct ovs_list txq;
55 };
56
57 enum dummy_packet_conn_type {
58 NONE, /* No connection is configured. */
59 PASSIVE, /* Listener. */
60 ACTIVE /* Connect to listener. */
61 };
62
63 enum dummy_netdev_conn_state {
64 CONN_STATE_CONNECTED, /* Listener connected. */
65 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
66 CONN_STATE_UNKNOWN, /* No relavent information. */
67 };
68
69 struct dummy_packet_pconn {
70 struct pstream *pstream;
71 struct dummy_packet_stream **streams;
72 size_t n_streams;
73 };
74
75 struct dummy_packet_rconn {
76 struct dummy_packet_stream *rstream;
77 struct reconnect *reconnect;
78 };
79
80 struct dummy_packet_conn {
81 enum dummy_packet_conn_type type;
82 union {
83 struct dummy_packet_pconn pconn;
84 struct dummy_packet_rconn rconn;
85 } u;
86 };
87
88 struct pkt_list_node {
89 struct dp_packet *pkt;
90 struct ovs_list list_node;
91 };
92
93 /* Protects 'dummy_list'. */
94 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95
96 /* Contains all 'struct dummy_dev's. */
97 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
98 = OVS_LIST_INITIALIZER(&dummy_list);
99
100 struct netdev_dummy {
101 struct netdev up;
102
103 /* In dummy_list. */
104 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105
106 /* Protects all members below. */
107 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108
109 struct eth_addr hwaddr OVS_GUARDED;
110 int mtu OVS_GUARDED;
111 struct netdev_stats stats OVS_GUARDED;
112 enum netdev_flags flags OVS_GUARDED;
113 int ifindex OVS_GUARDED;
114 int numa_id OVS_GUARDED;
115
116 struct dummy_packet_conn conn OVS_GUARDED;
117
118 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
119
120 struct in_addr address, netmask;
121 struct in6_addr ipv6, ipv6_mask;
122 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
123
124 /* The following properties are for dummy-pmd and they cannot be changed
125 * when a device is running, so we remember the request and update them
126 * next time netdev_dummy_reconfigure() is called. */
127 int requested_n_txq OVS_GUARDED;
128 int requested_n_rxq OVS_GUARDED;
129 int requested_numa_id OVS_GUARDED;
130 };
131
132 /* Max 'recv_queue_len' in struct netdev_dummy. */
133 #define NETDEV_DUMMY_MAX_QUEUE 100
134
135 struct netdev_rxq_dummy {
136 struct netdev_rxq up;
137 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
138 struct ovs_list recv_queue;
139 int recv_queue_len; /* ovs_list_size(&recv_queue). */
140 struct seq *seq; /* Reports newly queued packets. */
141 };
142
143 static unixctl_cb_func netdev_dummy_set_admin_state;
144 static int netdev_dummy_construct(struct netdev *);
145 static void netdev_dummy_queue_packet(struct netdev_dummy *,
146 struct dp_packet *, int);
147
148 static void dummy_packet_stream_close(struct dummy_packet_stream *);
149
150 static void pkt_list_delete(struct ovs_list *);
151
152 static bool
153 is_dummy_class(const struct netdev_class *class)
154 {
155 return class->construct == netdev_dummy_construct;
156 }
157
158 static struct netdev_dummy *
159 netdev_dummy_cast(const struct netdev *netdev)
160 {
161 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
162 return CONTAINER_OF(netdev, struct netdev_dummy, up);
163 }
164
165 static struct netdev_rxq_dummy *
166 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
167 {
168 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
169 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
170 }
171
172 static void
173 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
174 {
175 int rxbuf_size = stream ? 2048 : 0;
176 s->stream = stream;
177 dp_packet_init(&s->rxbuf, rxbuf_size);
178 ovs_list_init(&s->txq);
179 }
180
181 static struct dummy_packet_stream *
182 dummy_packet_stream_create(struct stream *stream)
183 {
184 struct dummy_packet_stream *s;
185
186 s = xzalloc(sizeof *s);
187 dummy_packet_stream_init(s, stream);
188
189 return s;
190 }
191
192 static void
193 dummy_packet_stream_wait(struct dummy_packet_stream *s)
194 {
195 stream_run_wait(s->stream);
196 if (!ovs_list_is_empty(&s->txq)) {
197 stream_send_wait(s->stream);
198 }
199 stream_recv_wait(s->stream);
200 }
201
202 static void
203 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
204 {
205 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
206 struct dp_packet *b;
207 struct pkt_list_node *node;
208
209 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
210 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
211
212 node = xmalloc(sizeof *node);
213 node->pkt = b;
214 ovs_list_push_back(&s->txq, &node->list_node);
215 }
216 }
217
218 static int
219 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
220 {
221 int error = 0;
222 size_t n;
223
224 stream_run(s->stream);
225
226 if (!ovs_list_is_empty(&s->txq)) {
227 struct pkt_list_node *txbuf_node;
228 struct dp_packet *txbuf;
229 int retval;
230
231 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
232 txbuf = txbuf_node->pkt;
233 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
234
235 if (retval > 0) {
236 dp_packet_pull(txbuf, retval);
237 if (!dp_packet_size(txbuf)) {
238 ovs_list_remove(&txbuf_node->list_node);
239 free(txbuf_node);
240 dp_packet_delete(txbuf);
241 }
242 } else if (retval != -EAGAIN) {
243 error = -retval;
244 }
245 }
246
247 if (!error) {
248 if (dp_packet_size(&s->rxbuf) < 2) {
249 n = 2 - dp_packet_size(&s->rxbuf);
250 } else {
251 uint16_t frame_len;
252
253 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
254 if (frame_len < ETH_HEADER_LEN) {
255 error = EPROTO;
256 n = 0;
257 } else {
258 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
259 }
260 }
261 }
262 if (!error) {
263 int retval;
264
265 dp_packet_prealloc_tailroom(&s->rxbuf, n);
266 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
267
268 if (retval > 0) {
269 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
270 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
271 dp_packet_pull(&s->rxbuf, 2);
272 netdev_dummy_queue_packet(dev,
273 dp_packet_clone(&s->rxbuf), 0);
274 dp_packet_clear(&s->rxbuf);
275 }
276 } else if (retval != -EAGAIN) {
277 error = (retval < 0 ? -retval
278 : dp_packet_size(&s->rxbuf) ? EPROTO
279 : EOF);
280 }
281 }
282
283 return error;
284 }
285
286 static void
287 dummy_packet_stream_close(struct dummy_packet_stream *s)
288 {
289 stream_close(s->stream);
290 dp_packet_uninit(&s->rxbuf);
291 pkt_list_delete(&s->txq);
292 }
293
294 static void
295 dummy_packet_conn_init(struct dummy_packet_conn *conn)
296 {
297 memset(conn, 0, sizeof *conn);
298 conn->type = NONE;
299 }
300
301 static void
302 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
303 {
304
305 switch (conn->type) {
306 case PASSIVE:
307 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
308 break;
309
310 case ACTIVE:
311 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
312 break;
313
314 case NONE:
315 default:
316 break;
317 }
318 }
319
320 static void
321 dummy_packet_conn_close(struct dummy_packet_conn *conn)
322 {
323 int i;
324 struct dummy_packet_pconn *pconn = &conn->u.pconn;
325 struct dummy_packet_rconn *rconn = &conn->u.rconn;
326
327 switch (conn->type) {
328 case PASSIVE:
329 pstream_close(pconn->pstream);
330 for (i = 0; i < pconn->n_streams; i++) {
331 dummy_packet_stream_close(pconn->streams[i]);
332 free(pconn->streams[i]);
333 }
334 free(pconn->streams);
335 pconn->pstream = NULL;
336 pconn->streams = NULL;
337 break;
338
339 case ACTIVE:
340 dummy_packet_stream_close(rconn->rstream);
341 free(rconn->rstream);
342 rconn->rstream = NULL;
343 reconnect_destroy(rconn->reconnect);
344 rconn->reconnect = NULL;
345 break;
346
347 case NONE:
348 default:
349 break;
350 }
351
352 conn->type = NONE;
353 memset(conn, 0, sizeof *conn);
354 }
355
356 static void
357 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
358 const struct smap *args)
359 {
360 const char *pstream = smap_get(args, "pstream");
361 const char *stream = smap_get(args, "stream");
362
363 if (pstream && stream) {
364 VLOG_WARN("Open failed: both %s and %s are configured",
365 pstream, stream);
366 return;
367 }
368
369 switch (conn->type) {
370 case PASSIVE:
371 if (pstream &&
372 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
373 return;
374 }
375 dummy_packet_conn_close(conn);
376 break;
377 case ACTIVE:
378 if (stream &&
379 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
380 return;
381 }
382 dummy_packet_conn_close(conn);
383 break;
384 case NONE:
385 default:
386 break;
387 }
388
389 if (pstream) {
390 int error;
391
392 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
393 if (error) {
394 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
395 } else {
396 conn->type = PASSIVE;
397 }
398 }
399
400 if (stream) {
401 int error;
402 struct stream *active_stream;
403 struct reconnect *reconnect;
404
405 reconnect = reconnect_create(time_msec());
406 reconnect_set_name(reconnect, stream);
407 reconnect_set_passive(reconnect, false, time_msec());
408 reconnect_enable(reconnect, time_msec());
409 reconnect_set_backoff(reconnect, 100, INT_MAX);
410 reconnect_set_probe_interval(reconnect, 0);
411 conn->u.rconn.reconnect = reconnect;
412 conn->type = ACTIVE;
413
414 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
415 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
416
417 switch (error) {
418 case 0:
419 reconnect_connected(reconnect, time_msec());
420 break;
421
422 case EAGAIN:
423 reconnect_connecting(reconnect, time_msec());
424 break;
425
426 default:
427 reconnect_connect_failed(reconnect, time_msec(), error);
428 stream_close(active_stream);
429 conn->u.rconn.rstream->stream = NULL;
430 break;
431 }
432 }
433 }
434
435 static void
436 dummy_pconn_run(struct netdev_dummy *dev)
437 OVS_REQUIRES(dev->mutex)
438 {
439 struct stream *new_stream;
440 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
441 int error;
442 size_t i;
443
444 error = pstream_accept(pconn->pstream, &new_stream);
445 if (!error) {
446 struct dummy_packet_stream *s;
447
448 pconn->streams = xrealloc(pconn->streams,
449 ((pconn->n_streams + 1)
450 * sizeof s));
451 s = xmalloc(sizeof *s);
452 pconn->streams[pconn->n_streams++] = s;
453 dummy_packet_stream_init(s, new_stream);
454 } else if (error != EAGAIN) {
455 VLOG_WARN("%s: accept failed (%s)",
456 pstream_get_name(pconn->pstream), ovs_strerror(error));
457 pstream_close(pconn->pstream);
458 pconn->pstream = NULL;
459 dev->conn.type = NONE;
460 }
461
462 for (i = 0; i < pconn->n_streams; ) {
463 struct dummy_packet_stream *s = pconn->streams[i];
464
465 error = dummy_packet_stream_run(dev, s);
466 if (error) {
467 VLOG_DBG("%s: closing connection (%s)",
468 stream_get_name(s->stream),
469 ovs_retval_to_string(error));
470 dummy_packet_stream_close(s);
471 free(s);
472 pconn->streams[i] = pconn->streams[--pconn->n_streams];
473 } else {
474 i++;
475 }
476 }
477 }
478
479 static void
480 dummy_rconn_run(struct netdev_dummy *dev)
481 OVS_REQUIRES(dev->mutex)
482 {
483 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
484
485 switch (reconnect_run(rconn->reconnect, time_msec())) {
486 case RECONNECT_CONNECT:
487 {
488 int error;
489
490 if (rconn->rstream->stream) {
491 error = stream_connect(rconn->rstream->stream);
492 } else {
493 error = stream_open(reconnect_get_name(rconn->reconnect),
494 &rconn->rstream->stream, DSCP_DEFAULT);
495 }
496
497 switch (error) {
498 case 0:
499 reconnect_connected(rconn->reconnect, time_msec());
500 break;
501
502 case EAGAIN:
503 reconnect_connecting(rconn->reconnect, time_msec());
504 break;
505
506 default:
507 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
508 stream_close(rconn->rstream->stream);
509 rconn->rstream->stream = NULL;
510 break;
511 }
512 }
513 break;
514
515 case RECONNECT_DISCONNECT:
516 case RECONNECT_PROBE:
517 default:
518 break;
519 }
520
521 if (reconnect_is_connected(rconn->reconnect)) {
522 int err;
523
524 err = dummy_packet_stream_run(dev, rconn->rstream);
525
526 if (err) {
527 reconnect_disconnected(rconn->reconnect, time_msec(), err);
528 stream_close(rconn->rstream->stream);
529 rconn->rstream->stream = NULL;
530 }
531 }
532 }
533
534 static void
535 dummy_packet_conn_run(struct netdev_dummy *dev)
536 OVS_REQUIRES(dev->mutex)
537 {
538 switch (dev->conn.type) {
539 case PASSIVE:
540 dummy_pconn_run(dev);
541 break;
542
543 case ACTIVE:
544 dummy_rconn_run(dev);
545 break;
546
547 case NONE:
548 default:
549 break;
550 }
551 }
552
553 static void
554 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
555 {
556 int i;
557 switch (conn->type) {
558 case PASSIVE:
559 pstream_wait(conn->u.pconn.pstream);
560 for (i = 0; i < conn->u.pconn.n_streams; i++) {
561 struct dummy_packet_stream *s = conn->u.pconn.streams[i];
562 dummy_packet_stream_wait(s);
563 }
564 break;
565 case ACTIVE:
566 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
567 dummy_packet_stream_wait(conn->u.rconn.rstream);
568 }
569 break;
570
571 case NONE:
572 default:
573 break;
574 }
575 }
576
577 static void
578 dummy_packet_conn_send(struct dummy_packet_conn *conn,
579 const void *buffer, size_t size)
580 {
581 int i;
582
583 switch (conn->type) {
584 case PASSIVE:
585 for (i = 0; i < conn->u.pconn.n_streams; i++) {
586 struct dummy_packet_stream *s = conn->u.pconn.streams[i];
587
588 dummy_packet_stream_send(s, buffer, size);
589 pstream_wait(conn->u.pconn.pstream);
590 }
591 break;
592
593 case ACTIVE:
594 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
595 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
596 dummy_packet_stream_wait(conn->u.rconn.rstream);
597 }
598 break;
599
600 case NONE:
601 default:
602 break;
603 }
604 }
605
606 static enum dummy_netdev_conn_state
607 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
608 {
609 enum dummy_netdev_conn_state state;
610
611 if (conn->type == ACTIVE) {
612 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
613 state = CONN_STATE_CONNECTED;
614 } else {
615 state = CONN_STATE_NOT_CONNECTED;
616 }
617 } else {
618 state = CONN_STATE_UNKNOWN;
619 }
620
621 return state;
622 }
623
624 static void
625 netdev_dummy_run(const struct netdev_class *netdev_class)
626 {
627 struct netdev_dummy *dev;
628
629 ovs_mutex_lock(&dummy_list_mutex);
630 LIST_FOR_EACH (dev, list_node, &dummy_list) {
631 if (netdev_get_class(&dev->up) != netdev_class) {
632 continue;
633 }
634 ovs_mutex_lock(&dev->mutex);
635 dummy_packet_conn_run(dev);
636 ovs_mutex_unlock(&dev->mutex);
637 }
638 ovs_mutex_unlock(&dummy_list_mutex);
639 }
640
641 static void
642 netdev_dummy_wait(const struct netdev_class *netdev_class)
643 {
644 struct netdev_dummy *dev;
645
646 ovs_mutex_lock(&dummy_list_mutex);
647 LIST_FOR_EACH (dev, list_node, &dummy_list) {
648 if (netdev_get_class(&dev->up) != netdev_class) {
649 continue;
650 }
651 ovs_mutex_lock(&dev->mutex);
652 dummy_packet_conn_wait(&dev->conn);
653 ovs_mutex_unlock(&dev->mutex);
654 }
655 ovs_mutex_unlock(&dummy_list_mutex);
656 }
657
658 static struct netdev *
659 netdev_dummy_alloc(void)
660 {
661 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
662 return &netdev->up;
663 }
664
665 static int
666 netdev_dummy_construct(struct netdev *netdev_)
667 {
668 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
669 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
670 unsigned int n;
671
672 n = atomic_count_inc(&next_n);
673
674 ovs_mutex_init(&netdev->mutex);
675 ovs_mutex_lock(&netdev->mutex);
676 netdev->hwaddr.ea[0] = 0xaa;
677 netdev->hwaddr.ea[1] = 0x55;
678 netdev->hwaddr.ea[2] = n >> 24;
679 netdev->hwaddr.ea[3] = n >> 16;
680 netdev->hwaddr.ea[4] = n >> 8;
681 netdev->hwaddr.ea[5] = n;
682 netdev->mtu = 1500;
683 netdev->flags = 0;
684 netdev->ifindex = -EOPNOTSUPP;
685 netdev->requested_n_rxq = netdev_->n_rxq;
686 netdev->requested_n_txq = netdev_->n_txq;
687 netdev->numa_id = 0;
688
689 dummy_packet_conn_init(&netdev->conn);
690
691 ovs_list_init(&netdev->rxes);
692 ovs_mutex_unlock(&netdev->mutex);
693
694 ovs_mutex_lock(&dummy_list_mutex);
695 ovs_list_push_back(&dummy_list, &netdev->list_node);
696 ovs_mutex_unlock(&dummy_list_mutex);
697
698 return 0;
699 }
700
701 static void
702 netdev_dummy_destruct(struct netdev *netdev_)
703 {
704 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
705
706 ovs_mutex_lock(&dummy_list_mutex);
707 ovs_list_remove(&netdev->list_node);
708 ovs_mutex_unlock(&dummy_list_mutex);
709
710 ovs_mutex_lock(&netdev->mutex);
711 dummy_packet_conn_close(&netdev->conn);
712 netdev->conn.type = NONE;
713
714 ovs_mutex_unlock(&netdev->mutex);
715 ovs_mutex_destroy(&netdev->mutex);
716 }
717
718 static void
719 netdev_dummy_dealloc(struct netdev *netdev_)
720 {
721 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
722
723 free(netdev);
724 }
725
726 static int
727 netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
728 {
729 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
730
731 ovs_mutex_lock(&netdev->mutex);
732
733 if (netdev->ifindex >= 0) {
734 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
735 }
736
737 dummy_packet_conn_get_config(&netdev->conn, args);
738
739 /* 'dummy-pmd' specific config. */
740 if (!netdev_is_pmd(dev)) {
741 goto exit;
742 }
743 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
744 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
745 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
746 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
747
748 exit:
749 ovs_mutex_unlock(&netdev->mutex);
750 return 0;
751 }
752
753 static int
754 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
755 struct in6_addr **pmask, int *n_addr)
756 {
757 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
758 int cnt = 0, i = 0, err = 0;
759 struct in6_addr *addr, *mask;
760
761 ovs_mutex_lock(&netdev->mutex);
762 if (netdev->address.s_addr != INADDR_ANY) {
763 cnt++;
764 }
765
766 if (ipv6_addr_is_set(&netdev->ipv6)) {
767 cnt++;
768 }
769 if (!cnt) {
770 err = EADDRNOTAVAIL;
771 goto out;
772 }
773 addr = xmalloc(sizeof *addr * cnt);
774 mask = xmalloc(sizeof *mask * cnt);
775 if (netdev->address.s_addr != INADDR_ANY) {
776 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
777 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
778 i++;
779 }
780
781 if (ipv6_addr_is_set(&netdev->ipv6)) {
782 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
783 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
784 i++;
785 }
786 if (paddr) {
787 *paddr = addr;
788 *pmask = mask;
789 *n_addr = cnt;
790 } else {
791 free(addr);
792 free(mask);
793 }
794 out:
795 ovs_mutex_unlock(&netdev->mutex);
796
797 return err;
798 }
799
800 static int
801 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
802 struct in_addr netmask)
803 {
804 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
805
806 ovs_mutex_lock(&netdev->mutex);
807 netdev->address = address;
808 netdev->netmask = netmask;
809 netdev_change_seq_changed(netdev_);
810 ovs_mutex_unlock(&netdev->mutex);
811
812 return 0;
813 }
814
815 static int
816 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
817 struct in6_addr *mask)
818 {
819 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
820
821 ovs_mutex_lock(&netdev->mutex);
822 netdev->ipv6 = *in6;
823 netdev->ipv6_mask = *mask;
824 netdev_change_seq_changed(netdev_);
825 ovs_mutex_unlock(&netdev->mutex);
826
827 return 0;
828 }
829
830 #define DUMMY_MAX_QUEUES_PER_PORT 1024
831
832 static int
833 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args,
834 char **errp OVS_UNUSED)
835 {
836 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
837 const char *pcap;
838 int new_n_rxq, new_n_txq, new_numa_id;
839
840 ovs_mutex_lock(&netdev->mutex);
841 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
842
843 dummy_packet_conn_set_config(&netdev->conn, args);
844
845 if (netdev->rxq_pcap) {
846 fclose(netdev->rxq_pcap);
847 }
848 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
849 fclose(netdev->tx_pcap);
850 }
851 netdev->rxq_pcap = netdev->tx_pcap = NULL;
852 pcap = smap_get(args, "pcap");
853 if (pcap) {
854 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
855 } else {
856 const char *rxq_pcap = smap_get(args, "rxq_pcap");
857 const char *tx_pcap = smap_get(args, "tx_pcap");
858
859 if (rxq_pcap) {
860 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
861 }
862 if (tx_pcap) {
863 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
864 }
865 }
866
867 netdev_change_seq_changed(netdev_);
868
869 /* 'dummy-pmd' specific config. */
870 if (!netdev_->netdev_class->is_pmd) {
871 goto exit;
872 }
873
874 new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
875 new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1);
876
877 if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT ||
878 new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) {
879 VLOG_WARN("The one or both of interface %s queues"
880 "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
881 netdev_get_name(netdev_),
882 new_n_rxq,
883 new_n_txq,
884 DUMMY_MAX_QUEUES_PER_PORT,
885 DUMMY_MAX_QUEUES_PER_PORT);
886
887 new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq);
888 new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq);
889 }
890
891 new_numa_id = smap_get_int(args, "numa_id", 0);
892 if (new_n_rxq != netdev->requested_n_rxq
893 || new_n_txq != netdev->requested_n_txq
894 || new_numa_id != netdev->requested_numa_id) {
895 netdev->requested_n_rxq = new_n_rxq;
896 netdev->requested_n_txq = new_n_txq;
897 netdev->requested_numa_id = new_numa_id;
898 netdev_request_reconfigure(netdev_);
899 }
900
901 exit:
902 ovs_mutex_unlock(&netdev->mutex);
903 return 0;
904 }
905
906 static int
907 netdev_dummy_get_numa_id(const struct netdev *netdev_)
908 {
909 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
910
911 ovs_mutex_lock(&netdev->mutex);
912 int numa_id = netdev->numa_id;
913 ovs_mutex_unlock(&netdev->mutex);
914
915 return numa_id;
916 }
917
918 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
919 static int
920 netdev_dummy_reconfigure(struct netdev *netdev_)
921 {
922 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
923
924 ovs_mutex_lock(&netdev->mutex);
925
926 netdev_->n_txq = netdev->requested_n_txq;
927 netdev_->n_rxq = netdev->requested_n_rxq;
928 netdev->numa_id = netdev->requested_numa_id;
929
930 ovs_mutex_unlock(&netdev->mutex);
931 return 0;
932 }
933
934 static struct netdev_rxq *
935 netdev_dummy_rxq_alloc(void)
936 {
937 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
938 return &rx->up;
939 }
940
941 static int
942 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
943 {
944 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
945 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
946
947 ovs_mutex_lock(&netdev->mutex);
948 ovs_list_push_back(&netdev->rxes, &rx->node);
949 ovs_list_init(&rx->recv_queue);
950 rx->recv_queue_len = 0;
951 rx->seq = seq_create();
952 ovs_mutex_unlock(&netdev->mutex);
953
954 return 0;
955 }
956
957 static void
958 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
959 {
960 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
961 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
962
963 ovs_mutex_lock(&netdev->mutex);
964 ovs_list_remove(&rx->node);
965 pkt_list_delete(&rx->recv_queue);
966 ovs_mutex_unlock(&netdev->mutex);
967 seq_destroy(rx->seq);
968 }
969
970 static void
971 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
972 {
973 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
974
975 free(rx);
976 }
977
978 static int
979 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch)
980 {
981 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
982 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
983 struct dp_packet *packet;
984
985 ovs_mutex_lock(&netdev->mutex);
986 if (!ovs_list_is_empty(&rx->recv_queue)) {
987 struct pkt_list_node *pkt_node;
988
989 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
990 packet = pkt_node->pkt;
991 free(pkt_node);
992 rx->recv_queue_len--;
993 } else {
994 packet = NULL;
995 }
996 ovs_mutex_unlock(&netdev->mutex);
997
998 if (!packet) {
999 if (netdev_is_pmd(&netdev->up)) {
1000 /* If 'netdev' is a PMD device, this is called as part of the PMD
1001 * thread busy loop. We yield here (without quiescing) for two
1002 * reasons:
1003 *
1004 * - To reduce the CPU utilization during the testsuite
1005 * - To give valgrind a chance to switch thread. According
1006 * to the valgrind documentation, there's a big lock that
1007 * prevents multiple thread from being executed at the same
1008 * time. On my system, without this sleep, the pmd threads
1009 * testcases fail under valgrind, because ovs-vswitchd becomes
1010 * unresponsive. */
1011 sched_yield();
1012 }
1013 return EAGAIN;
1014 }
1015 ovs_mutex_lock(&netdev->mutex);
1016 netdev->stats.rx_packets++;
1017 netdev->stats.rx_bytes += dp_packet_size(packet);
1018 ovs_mutex_unlock(&netdev->mutex);
1019
1020 batch->packets[0] = packet;
1021 batch->count = 1;
1022 return 0;
1023 }
1024
1025 static void
1026 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
1027 {
1028 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1029 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1030 uint64_t seq = seq_read(rx->seq);
1031
1032 ovs_mutex_lock(&netdev->mutex);
1033 if (!ovs_list_is_empty(&rx->recv_queue)) {
1034 poll_immediate_wake();
1035 } else {
1036 seq_wait(rx->seq, seq);
1037 }
1038 ovs_mutex_unlock(&netdev->mutex);
1039 }
1040
1041 static int
1042 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
1043 {
1044 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1045 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1046
1047 ovs_mutex_lock(&netdev->mutex);
1048 pkt_list_delete(&rx->recv_queue);
1049 rx->recv_queue_len = 0;
1050 ovs_mutex_unlock(&netdev->mutex);
1051
1052 seq_change(rx->seq);
1053
1054 return 0;
1055 }
1056
1057 static int
1058 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
1059 struct dp_packet_batch *batch, bool may_steal,
1060 bool concurrent_txq OVS_UNUSED)
1061 {
1062 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1063 int error = 0;
1064
1065 struct dp_packet *packet;
1066 DP_PACKET_BATCH_FOR_EACH(packet, batch) {
1067 const void *buffer = dp_packet_data(packet);
1068 size_t size = dp_packet_size(packet);
1069
1070 if (batch->packets[i]->packet_type != htonl(PT_ETH)) {
1071 error = EPFNOSUPPORT;
1072 break;
1073 }
1074
1075 size -= dp_packet_get_cutlen(packet);
1076
1077 if (size < ETH_HEADER_LEN) {
1078 error = EMSGSIZE;
1079 break;
1080 } else {
1081 const struct eth_header *eth = buffer;
1082 int max_size;
1083
1084 ovs_mutex_lock(&dev->mutex);
1085 max_size = dev->mtu + ETH_HEADER_LEN;
1086 ovs_mutex_unlock(&dev->mutex);
1087
1088 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1089 max_size += VLAN_HEADER_LEN;
1090 }
1091 if (size > max_size) {
1092 error = EMSGSIZE;
1093 break;
1094 }
1095 }
1096
1097 ovs_mutex_lock(&dev->mutex);
1098 dev->stats.tx_packets++;
1099 dev->stats.tx_bytes += size;
1100
1101 dummy_packet_conn_send(&dev->conn, buffer, size);
1102
1103 /* Reply to ARP requests for 'dev''s assigned IP address. */
1104 if (dev->address.s_addr) {
1105 struct dp_packet packet;
1106 struct flow flow;
1107
1108 dp_packet_use_const(&packet, buffer, size);
1109 flow_extract(&packet, &flow);
1110 if (flow.dl_type == htons(ETH_TYPE_ARP)
1111 && flow.nw_proto == ARP_OP_REQUEST
1112 && flow.nw_dst == dev->address.s_addr) {
1113 struct dp_packet *reply = dp_packet_new(0);
1114 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1115 false, flow.nw_dst, flow.nw_src);
1116 netdev_dummy_queue_packet(dev, reply, 0);
1117 }
1118 }
1119
1120 if (dev->tx_pcap) {
1121 struct dp_packet packet;
1122
1123 dp_packet_use_const(&packet, buffer, size);
1124 ovs_pcap_write(dev->tx_pcap, &packet);
1125 fflush(dev->tx_pcap);
1126 }
1127
1128 ovs_mutex_unlock(&dev->mutex);
1129 }
1130
1131 dp_packet_delete_batch(batch, may_steal);
1132
1133 return error;
1134 }
1135
1136 static int
1137 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1138 {
1139 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1140
1141 ovs_mutex_lock(&dev->mutex);
1142 if (!eth_addr_equals(dev->hwaddr, mac)) {
1143 dev->hwaddr = mac;
1144 netdev_change_seq_changed(netdev);
1145 }
1146 ovs_mutex_unlock(&dev->mutex);
1147
1148 return 0;
1149 }
1150
1151 static int
1152 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1153 {
1154 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1155
1156 ovs_mutex_lock(&dev->mutex);
1157 *mac = dev->hwaddr;
1158 ovs_mutex_unlock(&dev->mutex);
1159
1160 return 0;
1161 }
1162
1163 static int
1164 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1165 {
1166 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1167
1168 ovs_mutex_lock(&dev->mutex);
1169 *mtup = dev->mtu;
1170 ovs_mutex_unlock(&dev->mutex);
1171
1172 return 0;
1173 }
1174
1175 #define DUMMY_MIN_MTU 68
1176 #define DUMMY_MAX_MTU 65535
1177
1178 static int
1179 netdev_dummy_set_mtu(struct netdev *netdev, int mtu)
1180 {
1181 if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) {
1182 return EINVAL;
1183 }
1184
1185 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1186
1187 ovs_mutex_lock(&dev->mutex);
1188 if (dev->mtu != mtu) {
1189 dev->mtu = mtu;
1190 netdev_change_seq_changed(netdev);
1191 }
1192 ovs_mutex_unlock(&dev->mutex);
1193
1194 return 0;
1195 }
1196
1197 static int
1198 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1199 {
1200 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1201
1202 ovs_mutex_lock(&dev->mutex);
1203 /* Passing only collected counters */
1204 stats->tx_packets = dev->stats.tx_packets;
1205 stats->tx_bytes = dev->stats.tx_bytes;
1206 stats->rx_packets = dev->stats.rx_packets;
1207 stats->rx_bytes = dev->stats.rx_bytes;
1208 ovs_mutex_unlock(&dev->mutex);
1209
1210 return 0;
1211 }
1212
1213 static int
1214 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1215 unsigned int queue_id, struct smap *details OVS_UNUSED)
1216 {
1217 if (queue_id == 0) {
1218 return 0;
1219 } else {
1220 return EINVAL;
1221 }
1222 }
1223
1224 static void
1225 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1226 {
1227 *stats = (struct netdev_queue_stats) {
1228 .tx_bytes = UINT64_MAX,
1229 .tx_packets = UINT64_MAX,
1230 .tx_errors = UINT64_MAX,
1231 .created = LLONG_MIN,
1232 };
1233 }
1234
1235 static int
1236 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1237 unsigned int queue_id,
1238 struct netdev_queue_stats *stats)
1239 {
1240 if (queue_id == 0) {
1241 netdev_dummy_init_queue_stats(stats);
1242 return 0;
1243 } else {
1244 return EINVAL;
1245 }
1246 }
1247
1248 struct netdev_dummy_queue_state {
1249 unsigned int next_queue;
1250 };
1251
1252 static int
1253 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1254 void **statep)
1255 {
1256 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1257 state->next_queue = 0;
1258 *statep = state;
1259 return 0;
1260 }
1261
1262 static int
1263 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1264 void *state_,
1265 unsigned int *queue_id,
1266 struct smap *details OVS_UNUSED)
1267 {
1268 struct netdev_dummy_queue_state *state = state_;
1269 if (state->next_queue == 0) {
1270 *queue_id = 0;
1271 state->next_queue++;
1272 return 0;
1273 } else {
1274 return EOF;
1275 }
1276 }
1277
1278 static int
1279 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1280 void *state)
1281 {
1282 free(state);
1283 return 0;
1284 }
1285
1286 static int
1287 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1288 void (*cb)(unsigned int queue_id,
1289 struct netdev_queue_stats *,
1290 void *aux),
1291 void *aux)
1292 {
1293 struct netdev_queue_stats stats;
1294 netdev_dummy_init_queue_stats(&stats);
1295 cb(0, &stats, aux);
1296 return 0;
1297 }
1298
1299 static int
1300 netdev_dummy_get_ifindex(const struct netdev *netdev)
1301 {
1302 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1303 int ifindex;
1304
1305 ovs_mutex_lock(&dev->mutex);
1306 ifindex = dev->ifindex;
1307 ovs_mutex_unlock(&dev->mutex);
1308
1309 return ifindex;
1310 }
1311
1312 static int
1313 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1314 enum netdev_flags off, enum netdev_flags on,
1315 enum netdev_flags *old_flagsp)
1316 OVS_REQUIRES(netdev->mutex)
1317 {
1318 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1319 return EINVAL;
1320 }
1321
1322 *old_flagsp = netdev->flags;
1323 netdev->flags |= on;
1324 netdev->flags &= ~off;
1325 if (*old_flagsp != netdev->flags) {
1326 netdev_change_seq_changed(&netdev->up);
1327 }
1328
1329 return 0;
1330 }
1331
1332 static int
1333 netdev_dummy_update_flags(struct netdev *netdev_,
1334 enum netdev_flags off, enum netdev_flags on,
1335 enum netdev_flags *old_flagsp)
1336 {
1337 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1338 int error;
1339
1340 ovs_mutex_lock(&netdev->mutex);
1341 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1342 ovs_mutex_unlock(&netdev->mutex);
1343
1344 return error;
1345 }
1346 \f
1347 /* Helper functions. */
1348
1349 #define NETDEV_DUMMY_CLASS(NAME, PMD, RECOFIGURE) \
1350 { \
1351 NAME, \
1352 PMD, /* is_pmd */ \
1353 NULL, /* init */ \
1354 netdev_dummy_run, \
1355 netdev_dummy_wait, \
1356 \
1357 netdev_dummy_alloc, \
1358 netdev_dummy_construct, \
1359 netdev_dummy_destruct, \
1360 netdev_dummy_dealloc, \
1361 netdev_dummy_get_config, \
1362 netdev_dummy_set_config, \
1363 NULL, /* get_tunnel_config */ \
1364 NULL, /* build header */ \
1365 NULL, /* push header */ \
1366 NULL, /* pop header */ \
1367 netdev_dummy_get_numa_id, \
1368 NULL, /* set_tx_multiq */ \
1369 \
1370 netdev_dummy_send, /* send */ \
1371 NULL, /* send_wait */ \
1372 \
1373 netdev_dummy_set_etheraddr, \
1374 netdev_dummy_get_etheraddr, \
1375 netdev_dummy_get_mtu, \
1376 netdev_dummy_set_mtu, \
1377 netdev_dummy_get_ifindex, \
1378 NULL, /* get_carrier */ \
1379 NULL, /* get_carrier_resets */ \
1380 NULL, /* get_miimon */ \
1381 netdev_dummy_get_stats, \
1382 \
1383 NULL, /* get_features */ \
1384 NULL, /* set_advertisements */ \
1385 \
1386 NULL, /* set_policing */ \
1387 NULL, /* get_qos_types */ \
1388 NULL, /* get_qos_capabilities */ \
1389 NULL, /* get_qos */ \
1390 NULL, /* set_qos */ \
1391 netdev_dummy_get_queue, \
1392 NULL, /* set_queue */ \
1393 NULL, /* delete_queue */ \
1394 netdev_dummy_get_queue_stats, \
1395 netdev_dummy_queue_dump_start, \
1396 netdev_dummy_queue_dump_next, \
1397 netdev_dummy_queue_dump_done, \
1398 netdev_dummy_dump_queue_stats, \
1399 \
1400 NULL, /* set_in4 */ \
1401 netdev_dummy_get_addr_list, \
1402 NULL, /* add_router */ \
1403 NULL, /* get_next_hop */ \
1404 NULL, /* get_status */ \
1405 NULL, /* arp_lookup */ \
1406 \
1407 netdev_dummy_update_flags, \
1408 RECOFIGURE, \
1409 \
1410 netdev_dummy_rxq_alloc, \
1411 netdev_dummy_rxq_construct, \
1412 netdev_dummy_rxq_destruct, \
1413 netdev_dummy_rxq_dealloc, \
1414 netdev_dummy_rxq_recv, \
1415 netdev_dummy_rxq_wait, \
1416 netdev_dummy_rxq_drain, \
1417 \
1418 NO_OFFLOAD_API \
1419 }
1420
1421 static const struct netdev_class dummy_class =
1422 NETDEV_DUMMY_CLASS("dummy", false, NULL);
1423
1424 static const struct netdev_class dummy_internal_class =
1425 NETDEV_DUMMY_CLASS("dummy-internal", false, NULL);
1426
1427 static const struct netdev_class dummy_pmd_class =
1428 NETDEV_DUMMY_CLASS("dummy-pmd", true,
1429 netdev_dummy_reconfigure);
1430
1431 static void
1432 pkt_list_delete(struct ovs_list *l)
1433 {
1434 struct pkt_list_node *pkt;
1435
1436 LIST_FOR_EACH_POP(pkt, list_node, l) {
1437 dp_packet_delete(pkt->pkt);
1438 free(pkt);
1439 }
1440 }
1441
1442 static struct dp_packet *
1443 eth_from_packet(const char *s)
1444 {
1445 struct dp_packet *packet;
1446 eth_from_hex(s, &packet);
1447 return packet;
1448 }
1449
1450 static struct dp_packet *
1451 eth_from_flow(const char *s)
1452 {
1453 enum odp_key_fitness fitness;
1454 struct dp_packet *packet;
1455 struct ofpbuf odp_key;
1456 struct flow flow;
1457 int error;
1458
1459 /* Convert string to datapath key.
1460 *
1461 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1462 * the code for that currently calls exit() on parse error. We have to
1463 * settle for parsing a datapath key for now.
1464 */
1465 ofpbuf_init(&odp_key, 0);
1466 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1467 if (error) {
1468 ofpbuf_uninit(&odp_key);
1469 return NULL;
1470 }
1471
1472 /* Convert odp_key to flow. */
1473 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1474 if (fitness == ODP_FIT_ERROR) {
1475 ofpbuf_uninit(&odp_key);
1476 return NULL;
1477 }
1478
1479 packet = dp_packet_new(0);
1480 flow_compose(packet, &flow);
1481
1482 ofpbuf_uninit(&odp_key);
1483 return packet;
1484 }
1485
1486 static void
1487 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1488 {
1489 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1490
1491 pkt_node->pkt = packet;
1492 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1493 rx->recv_queue_len++;
1494 seq_change(rx->seq);
1495 }
1496
1497 static void
1498 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
1499 int queue_id)
1500 OVS_REQUIRES(dummy->mutex)
1501 {
1502 struct netdev_rxq_dummy *rx, *prev;
1503
1504 if (dummy->rxq_pcap) {
1505 ovs_pcap_write(dummy->rxq_pcap, packet);
1506 fflush(dummy->rxq_pcap);
1507 }
1508 prev = NULL;
1509 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1510 if (rx->up.queue_id == queue_id &&
1511 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1512 if (prev) {
1513 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1514 }
1515 prev = rx;
1516 }
1517 }
1518 if (prev) {
1519 netdev_dummy_queue_packet__(prev, packet);
1520 } else {
1521 dp_packet_delete(packet);
1522 }
1523 }
1524
1525 static void
1526 netdev_dummy_receive(struct unixctl_conn *conn,
1527 int argc, const char *argv[], void *aux OVS_UNUSED)
1528 {
1529 struct netdev_dummy *dummy_dev;
1530 struct netdev *netdev;
1531 int i, k = 1, rx_qid = 0;
1532
1533 netdev = netdev_from_name(argv[k++]);
1534 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1535 unixctl_command_reply_error(conn, "no such dummy netdev");
1536 goto exit_netdev;
1537 }
1538 dummy_dev = netdev_dummy_cast(netdev);
1539
1540 ovs_mutex_lock(&dummy_dev->mutex);
1541
1542 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1543 rx_qid = strtol(argv[k + 1], NULL, 10);
1544 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1545 unixctl_command_reply_error(conn, "bad rx queue id.");
1546 goto exit;
1547 }
1548 k += 2;
1549 }
1550
1551 for (i = k; i < argc; i++) {
1552 struct dp_packet *packet;
1553
1554 /* Try to parse 'argv[i]' as packet in hex. */
1555 packet = eth_from_packet(argv[i]);
1556
1557 if (!packet) {
1558 /* Try parse 'argv[i]' as odp flow. */
1559 packet = eth_from_flow(argv[i]);
1560
1561 if (!packet) {
1562 unixctl_command_reply_error(conn, "bad packet or flow syntax");
1563 goto exit;
1564 }
1565
1566 /* Parse optional --len argument immediately follows a 'flow'. */
1567 if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) {
1568 int packet_size = strtol(argv[i + 2], NULL, 10);
1569 dp_packet_set_size(packet, packet_size);
1570 i+=2;
1571 }
1572 }
1573
1574 netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
1575 }
1576
1577 unixctl_command_reply(conn, NULL);
1578
1579 exit:
1580 ovs_mutex_unlock(&dummy_dev->mutex);
1581 exit_netdev:
1582 netdev_close(netdev);
1583 }
1584
1585 static void
1586 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1587 OVS_REQUIRES(dev->mutex)
1588 {
1589 enum netdev_flags old_flags;
1590
1591 if (admin_state) {
1592 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1593 } else {
1594 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1595 }
1596 }
1597
1598 static void
1599 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1600 const char *argv[], void *aux OVS_UNUSED)
1601 {
1602 bool up;
1603
1604 if (!strcasecmp(argv[argc - 1], "up")) {
1605 up = true;
1606 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1607 up = false;
1608 } else {
1609 unixctl_command_reply_error(conn, "Invalid Admin State");
1610 return;
1611 }
1612
1613 if (argc > 2) {
1614 struct netdev *netdev = netdev_from_name(argv[1]);
1615 if (netdev && is_dummy_class(netdev->netdev_class)) {
1616 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1617
1618 ovs_mutex_lock(&dummy_dev->mutex);
1619 netdev_dummy_set_admin_state__(dummy_dev, up);
1620 ovs_mutex_unlock(&dummy_dev->mutex);
1621
1622 netdev_close(netdev);
1623 } else {
1624 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1625 netdev_close(netdev);
1626 return;
1627 }
1628 } else {
1629 struct netdev_dummy *netdev;
1630
1631 ovs_mutex_lock(&dummy_list_mutex);
1632 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1633 ovs_mutex_lock(&netdev->mutex);
1634 netdev_dummy_set_admin_state__(netdev, up);
1635 ovs_mutex_unlock(&netdev->mutex);
1636 }
1637 ovs_mutex_unlock(&dummy_list_mutex);
1638 }
1639 unixctl_command_reply(conn, "OK");
1640 }
1641
1642 static void
1643 display_conn_state__(struct ds *s, const char *name,
1644 enum dummy_netdev_conn_state state)
1645 {
1646 ds_put_format(s, "%s: ", name);
1647
1648 switch (state) {
1649 case CONN_STATE_CONNECTED:
1650 ds_put_cstr(s, "connected\n");
1651 break;
1652
1653 case CONN_STATE_NOT_CONNECTED:
1654 ds_put_cstr(s, "disconnected\n");
1655 break;
1656
1657 case CONN_STATE_UNKNOWN:
1658 default:
1659 ds_put_cstr(s, "unknown\n");
1660 break;
1661 };
1662 }
1663
1664 static void
1665 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1666 const char *argv[], void *aux OVS_UNUSED)
1667 {
1668 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1669 struct ds s;
1670
1671 ds_init(&s);
1672
1673 if (argc > 1) {
1674 const char *dev_name = argv[1];
1675 struct netdev *netdev = netdev_from_name(dev_name);
1676
1677 if (netdev && is_dummy_class(netdev->netdev_class)) {
1678 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1679
1680 ovs_mutex_lock(&dummy_dev->mutex);
1681 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1682 ovs_mutex_unlock(&dummy_dev->mutex);
1683
1684 netdev_close(netdev);
1685 }
1686 display_conn_state__(&s, dev_name, state);
1687 } else {
1688 struct netdev_dummy *netdev;
1689
1690 ovs_mutex_lock(&dummy_list_mutex);
1691 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1692 ovs_mutex_lock(&netdev->mutex);
1693 state = dummy_netdev_get_conn_state(&netdev->conn);
1694 ovs_mutex_unlock(&netdev->mutex);
1695 if (state != CONN_STATE_UNKNOWN) {
1696 display_conn_state__(&s, netdev->up.name, state);
1697 }
1698 }
1699 ovs_mutex_unlock(&dummy_list_mutex);
1700 }
1701
1702 unixctl_command_reply(conn, ds_cstr(&s));
1703 ds_destroy(&s);
1704 }
1705
1706 static void
1707 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1708 const char *argv[], void *aux OVS_UNUSED)
1709 {
1710 struct netdev *netdev = netdev_from_name(argv[1]);
1711
1712 if (netdev && is_dummy_class(netdev->netdev_class)) {
1713 struct in_addr ip, mask;
1714 char *error;
1715
1716 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1717 if (!error) {
1718 netdev_dummy_set_in4(netdev, ip, mask);
1719 unixctl_command_reply(conn, "OK");
1720 } else {
1721 unixctl_command_reply_error(conn, error);
1722 free(error);
1723 }
1724 } else {
1725 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1726 }
1727
1728 netdev_close(netdev);
1729 }
1730
1731 static void
1732 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1733 const char *argv[], void *aux OVS_UNUSED)
1734 {
1735 struct netdev *netdev = netdev_from_name(argv[1]);
1736
1737 if (netdev && is_dummy_class(netdev->netdev_class)) {
1738 struct in6_addr ip6;
1739 char *error;
1740 uint32_t plen;
1741
1742 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1743 if (!error) {
1744 struct in6_addr mask;
1745
1746 mask = ipv6_create_mask(plen);
1747 netdev_dummy_set_in6(netdev, &ip6, &mask);
1748 unixctl_command_reply(conn, "OK");
1749 } else {
1750 unixctl_command_reply_error(conn, error);
1751 free(error);
1752 }
1753 netdev_close(netdev);
1754 } else {
1755 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1756 }
1757
1758 netdev_close(netdev);
1759 }
1760
1761
1762 static void
1763 netdev_dummy_override(const char *type)
1764 {
1765 if (!netdev_unregister_provider(type)) {
1766 struct netdev_class *class;
1767 int error;
1768
1769 class = xmemdup(&dummy_class, sizeof dummy_class);
1770 class->type = xstrdup(type);
1771 error = netdev_register_provider(class);
1772 if (error) {
1773 VLOG_ERR("%s: failed to register netdev provider (%s)",
1774 type, ovs_strerror(error));
1775 free(CONST_CAST(char *, class->type));
1776 free(class);
1777 }
1778 }
1779 }
1780
1781 void
1782 netdev_dummy_register(enum dummy_level level)
1783 {
1784 unixctl_command_register("netdev-dummy/receive",
1785 "name [--qid queue_id] packet|flow [--len packet_len]",
1786 2, INT_MAX, netdev_dummy_receive, NULL);
1787 unixctl_command_register("netdev-dummy/set-admin-state",
1788 "[netdev] up|down", 1, 2,
1789 netdev_dummy_set_admin_state, NULL);
1790 unixctl_command_register("netdev-dummy/conn-state",
1791 "[netdev]", 0, 1,
1792 netdev_dummy_conn_state, NULL);
1793 unixctl_command_register("netdev-dummy/ip4addr",
1794 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1795 netdev_dummy_ip4addr, NULL);
1796 unixctl_command_register("netdev-dummy/ip6addr",
1797 "[netdev] ip6addr", 2, 2,
1798 netdev_dummy_ip6addr, NULL);
1799
1800 if (level == DUMMY_OVERRIDE_ALL) {
1801 struct sset types;
1802 const char *type;
1803
1804 sset_init(&types);
1805 netdev_enumerate_types(&types);
1806 SSET_FOR_EACH (type, &types) {
1807 if (strcmp(type, "patch")) {
1808 netdev_dummy_override(type);
1809 }
1810 }
1811 sset_destroy(&types);
1812 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1813 netdev_dummy_override("system");
1814 }
1815 netdev_register_provider(&dummy_class);
1816 netdev_register_provider(&dummy_internal_class);
1817 netdev_register_provider(&dummy_pmd_class);
1818
1819 netdev_vport_tunnel_register();
1820 }