]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
cirrus: Use FreeBSD 12.2.
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22 #include <unistd.h>
23
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
26 #include "flow.h"
27 #include "netdev-offload-provider.h"
28 #include "netdev-provider.h"
29 #include "netdev-vport.h"
30 #include "odp-util.h"
31 #include "openvswitch/dynamic-string.h"
32 #include "openvswitch/list.h"
33 #include "openvswitch/match.h"
34 #include "openvswitch/ofp-print.h"
35 #include "openvswitch/ofpbuf.h"
36 #include "openvswitch/vlog.h"
37 #include "ovs-atomic.h"
38 #include "packets.h"
39 #include "pcap-file.h"
40 #include "openvswitch/poll-loop.h"
41 #include "openvswitch/shash.h"
42 #include "sset.h"
43 #include "stream.h"
44 #include "unaligned.h"
45 #include "timeval.h"
46 #include "unixctl.h"
47 #include "reconnect.h"
48
49 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
50
51 #define C_STATS_SIZE 2
52
53 struct reconnect;
54
55 struct dummy_packet_stream {
56 struct stream *stream;
57 struct ovs_list txq;
58 struct dp_packet rxbuf;
59 };
60
61 enum dummy_packet_conn_type {
62 NONE, /* No connection is configured. */
63 PASSIVE, /* Listener. */
64 ACTIVE /* Connect to listener. */
65 };
66
67 enum dummy_netdev_conn_state {
68 CONN_STATE_CONNECTED, /* Listener connected. */
69 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
70 CONN_STATE_UNKNOWN, /* No relavent information. */
71 };
72
73 struct dummy_packet_pconn {
74 struct pstream *pstream;
75 struct dummy_packet_stream **streams;
76 size_t n_streams;
77 };
78
79 struct dummy_packet_rconn {
80 struct dummy_packet_stream *rstream;
81 struct reconnect *reconnect;
82 };
83
84 struct dummy_packet_conn {
85 enum dummy_packet_conn_type type;
86 union {
87 struct dummy_packet_pconn pconn;
88 struct dummy_packet_rconn rconn;
89 };
90 };
91
92 struct pkt_list_node {
93 struct dp_packet *pkt;
94 struct ovs_list list_node;
95 };
96
97 struct offloaded_flow {
98 struct hmap_node node;
99 ovs_u128 ufid;
100 struct match match;
101 uint32_t mark;
102 };
103
104 /* Protects 'dummy_list'. */
105 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
106
107 /* Contains all 'struct dummy_dev's. */
108 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
109 = OVS_LIST_INITIALIZER(&dummy_list);
110
111 struct netdev_dummy {
112 struct netdev up;
113
114 /* In dummy_list. */
115 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
116
117 /* Protects all members below. */
118 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
119
120 struct eth_addr hwaddr OVS_GUARDED;
121 int mtu OVS_GUARDED;
122 struct netdev_stats stats OVS_GUARDED;
123 struct netdev_custom_counter custom_stats[C_STATS_SIZE] OVS_GUARDED;
124 enum netdev_flags flags OVS_GUARDED;
125 int ifindex OVS_GUARDED;
126 int numa_id OVS_GUARDED;
127
128 struct dummy_packet_conn conn OVS_GUARDED;
129
130 struct pcap_file *tx_pcap, *rxq_pcap OVS_GUARDED;
131
132 struct in_addr address, netmask;
133 struct in6_addr ipv6, ipv6_mask;
134 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
135
136 struct hmap offloaded_flows OVS_GUARDED;
137
138 /* The following properties are for dummy-pmd and they cannot be changed
139 * when a device is running, so we remember the request and update them
140 * next time netdev_dummy_reconfigure() is called. */
141 int requested_n_txq OVS_GUARDED;
142 int requested_n_rxq OVS_GUARDED;
143 int requested_numa_id OVS_GUARDED;
144 };
145
146 /* Max 'recv_queue_len' in struct netdev_dummy. */
147 #define NETDEV_DUMMY_MAX_QUEUE 100
148
149 struct netdev_rxq_dummy {
150 struct netdev_rxq up;
151 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
152 struct ovs_list recv_queue;
153 int recv_queue_len; /* ovs_list_size(&recv_queue). */
154 struct seq *seq; /* Reports newly queued packets. */
155 };
156
157 static unixctl_cb_func netdev_dummy_set_admin_state;
158 static int netdev_dummy_construct(struct netdev *);
159 static void netdev_dummy_queue_packet(struct netdev_dummy *,
160 struct dp_packet *, struct flow *, int);
161
162 static void dummy_packet_stream_close(struct dummy_packet_stream *);
163
164 static void pkt_list_delete(struct ovs_list *);
165
166 static bool
167 is_dummy_class(const struct netdev_class *class)
168 {
169 return class->construct == netdev_dummy_construct;
170 }
171
172 static struct netdev_dummy *
173 netdev_dummy_cast(const struct netdev *netdev)
174 {
175 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
176 return CONTAINER_OF(netdev, struct netdev_dummy, up);
177 }
178
179 static struct netdev_rxq_dummy *
180 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
181 {
182 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
183 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
184 }
185
186 static void
187 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
188 {
189 int rxbuf_size = stream ? 2048 : 0;
190 s->stream = stream;
191 dp_packet_init(&s->rxbuf, rxbuf_size);
192 ovs_list_init(&s->txq);
193 }
194
195 static struct dummy_packet_stream *
196 dummy_packet_stream_create(struct stream *stream)
197 {
198 struct dummy_packet_stream *s;
199
200 s = xzalloc(sizeof *s);
201 dummy_packet_stream_init(s, stream);
202
203 return s;
204 }
205
206 static void
207 dummy_packet_stream_wait(struct dummy_packet_stream *s)
208 {
209 stream_run_wait(s->stream);
210 if (!ovs_list_is_empty(&s->txq)) {
211 stream_send_wait(s->stream);
212 }
213 stream_recv_wait(s->stream);
214 }
215
216 static void
217 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
218 {
219 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
220 struct dp_packet *b;
221 struct pkt_list_node *node;
222
223 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
224 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
225
226 node = xmalloc(sizeof *node);
227 node->pkt = b;
228 ovs_list_push_back(&s->txq, &node->list_node);
229 }
230 }
231
232 static int
233 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
234 {
235 int error = 0;
236 size_t n;
237
238 stream_run(s->stream);
239
240 if (!ovs_list_is_empty(&s->txq)) {
241 struct pkt_list_node *txbuf_node;
242 struct dp_packet *txbuf;
243 int retval;
244
245 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
246 txbuf = txbuf_node->pkt;
247 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
248
249 if (retval > 0) {
250 dp_packet_pull(txbuf, retval);
251 if (!dp_packet_size(txbuf)) {
252 ovs_list_remove(&txbuf_node->list_node);
253 free(txbuf_node);
254 dp_packet_delete(txbuf);
255 }
256 } else if (retval != -EAGAIN) {
257 error = -retval;
258 }
259 }
260
261 if (!error) {
262 if (dp_packet_size(&s->rxbuf) < 2) {
263 n = 2 - dp_packet_size(&s->rxbuf);
264 } else {
265 uint16_t frame_len;
266
267 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
268 if (frame_len < ETH_HEADER_LEN) {
269 error = EPROTO;
270 n = 0;
271 } else {
272 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
273 }
274 }
275 }
276 if (!error) {
277 int retval;
278
279 dp_packet_prealloc_tailroom(&s->rxbuf, n);
280 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
281
282 if (retval > 0) {
283 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
284 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
285 dp_packet_pull(&s->rxbuf, 2);
286 netdev_dummy_queue_packet(dev,
287 dp_packet_clone(&s->rxbuf), NULL, 0);
288 dp_packet_clear(&s->rxbuf);
289 }
290 } else if (retval != -EAGAIN) {
291 error = (retval < 0 ? -retval
292 : dp_packet_size(&s->rxbuf) ? EPROTO
293 : EOF);
294 }
295 }
296
297 return error;
298 }
299
300 static void
301 dummy_packet_stream_close(struct dummy_packet_stream *s)
302 {
303 stream_close(s->stream);
304 dp_packet_uninit(&s->rxbuf);
305 pkt_list_delete(&s->txq);
306 }
307
308 static void
309 dummy_packet_conn_init(struct dummy_packet_conn *conn)
310 {
311 memset(conn, 0, sizeof *conn);
312 conn->type = NONE;
313 }
314
315 static void
316 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
317 {
318
319 switch (conn->type) {
320 case PASSIVE:
321 smap_add(args, "pstream", pstream_get_name(conn->pconn.pstream));
322 break;
323
324 case ACTIVE:
325 smap_add(args, "stream", stream_get_name(conn->rconn.rstream->stream));
326 break;
327
328 case NONE:
329 default:
330 break;
331 }
332 }
333
334 static void
335 dummy_packet_conn_close(struct dummy_packet_conn *conn)
336 {
337 int i;
338 struct dummy_packet_pconn *pconn = &conn->pconn;
339 struct dummy_packet_rconn *rconn = &conn->rconn;
340
341 switch (conn->type) {
342 case PASSIVE:
343 pstream_close(pconn->pstream);
344 for (i = 0; i < pconn->n_streams; i++) {
345 dummy_packet_stream_close(pconn->streams[i]);
346 free(pconn->streams[i]);
347 }
348 free(pconn->streams);
349 pconn->pstream = NULL;
350 pconn->streams = NULL;
351 break;
352
353 case ACTIVE:
354 dummy_packet_stream_close(rconn->rstream);
355 free(rconn->rstream);
356 rconn->rstream = NULL;
357 reconnect_destroy(rconn->reconnect);
358 rconn->reconnect = NULL;
359 break;
360
361 case NONE:
362 default:
363 break;
364 }
365
366 conn->type = NONE;
367 memset(conn, 0, sizeof *conn);
368 }
369
370 static void
371 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
372 const struct smap *args)
373 {
374 const char *pstream = smap_get(args, "pstream");
375 const char *stream = smap_get(args, "stream");
376
377 if (pstream && stream) {
378 VLOG_WARN("Open failed: both %s and %s are configured",
379 pstream, stream);
380 return;
381 }
382
383 switch (conn->type) {
384 case PASSIVE:
385 if (pstream &&
386 !strcmp(pstream_get_name(conn->pconn.pstream), pstream)) {
387 return;
388 }
389 dummy_packet_conn_close(conn);
390 break;
391 case ACTIVE:
392 if (stream &&
393 !strcmp(stream_get_name(conn->rconn.rstream->stream), stream)) {
394 return;
395 }
396 dummy_packet_conn_close(conn);
397 break;
398 case NONE:
399 default:
400 break;
401 }
402
403 if (pstream) {
404 int error;
405
406 error = pstream_open(pstream, &conn->pconn.pstream, DSCP_DEFAULT);
407 if (error) {
408 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
409 } else {
410 conn->type = PASSIVE;
411 }
412 }
413
414 if (stream) {
415 int error;
416 struct stream *active_stream;
417 struct reconnect *reconnect;
418
419 reconnect = reconnect_create(time_msec());
420 reconnect_set_name(reconnect, stream);
421 reconnect_set_passive(reconnect, false, time_msec());
422 reconnect_enable(reconnect, time_msec());
423 reconnect_set_backoff(reconnect, 100, INT_MAX);
424 reconnect_set_probe_interval(reconnect, 0);
425 conn->rconn.reconnect = reconnect;
426 conn->type = ACTIVE;
427
428 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
429 conn->rconn.rstream = dummy_packet_stream_create(active_stream);
430
431 switch (error) {
432 case 0:
433 reconnect_connected(reconnect, time_msec());
434 break;
435
436 case EAGAIN:
437 reconnect_connecting(reconnect, time_msec());
438 break;
439
440 default:
441 reconnect_connect_failed(reconnect, time_msec(), error);
442 stream_close(active_stream);
443 conn->rconn.rstream->stream = NULL;
444 break;
445 }
446 }
447 }
448
449 static void
450 dummy_pconn_run(struct netdev_dummy *dev)
451 OVS_REQUIRES(dev->mutex)
452 {
453 struct stream *new_stream;
454 struct dummy_packet_pconn *pconn = &dev->conn.pconn;
455 int error;
456 size_t i;
457
458 error = pstream_accept(pconn->pstream, &new_stream);
459 if (!error) {
460 struct dummy_packet_stream *s;
461
462 pconn->streams = xrealloc(pconn->streams,
463 ((pconn->n_streams + 1)
464 * sizeof s));
465 s = xmalloc(sizeof *s);
466 pconn->streams[pconn->n_streams++] = s;
467 dummy_packet_stream_init(s, new_stream);
468 } else if (error != EAGAIN) {
469 VLOG_WARN("%s: accept failed (%s)",
470 pstream_get_name(pconn->pstream), ovs_strerror(error));
471 pstream_close(pconn->pstream);
472 pconn->pstream = NULL;
473 dev->conn.type = NONE;
474 }
475
476 for (i = 0; i < pconn->n_streams; ) {
477 struct dummy_packet_stream *s = pconn->streams[i];
478
479 error = dummy_packet_stream_run(dev, s);
480 if (error) {
481 VLOG_DBG("%s: closing connection (%s)",
482 stream_get_name(s->stream),
483 ovs_retval_to_string(error));
484 dummy_packet_stream_close(s);
485 free(s);
486 pconn->streams[i] = pconn->streams[--pconn->n_streams];
487 } else {
488 i++;
489 }
490 }
491 }
492
493 static void
494 dummy_rconn_run(struct netdev_dummy *dev)
495 OVS_REQUIRES(dev->mutex)
496 {
497 struct dummy_packet_rconn *rconn = &dev->conn.rconn;
498
499 switch (reconnect_run(rconn->reconnect, time_msec())) {
500 case RECONNECT_CONNECT:
501 {
502 int error;
503
504 if (rconn->rstream->stream) {
505 error = stream_connect(rconn->rstream->stream);
506 } else {
507 error = stream_open(reconnect_get_name(rconn->reconnect),
508 &rconn->rstream->stream, DSCP_DEFAULT);
509 }
510
511 switch (error) {
512 case 0:
513 reconnect_connected(rconn->reconnect, time_msec());
514 break;
515
516 case EAGAIN:
517 reconnect_connecting(rconn->reconnect, time_msec());
518 break;
519
520 default:
521 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
522 stream_close(rconn->rstream->stream);
523 rconn->rstream->stream = NULL;
524 break;
525 }
526 }
527 break;
528
529 case RECONNECT_DISCONNECT:
530 case RECONNECT_PROBE:
531 default:
532 break;
533 }
534
535 if (reconnect_is_connected(rconn->reconnect)) {
536 int err;
537
538 err = dummy_packet_stream_run(dev, rconn->rstream);
539
540 if (err) {
541 reconnect_disconnected(rconn->reconnect, time_msec(), err);
542 stream_close(rconn->rstream->stream);
543 rconn->rstream->stream = NULL;
544 }
545 }
546 }
547
548 static void
549 dummy_packet_conn_run(struct netdev_dummy *dev)
550 OVS_REQUIRES(dev->mutex)
551 {
552 switch (dev->conn.type) {
553 case PASSIVE:
554 dummy_pconn_run(dev);
555 break;
556
557 case ACTIVE:
558 dummy_rconn_run(dev);
559 break;
560
561 case NONE:
562 default:
563 break;
564 }
565 }
566
567 static void
568 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
569 {
570 int i;
571 switch (conn->type) {
572 case PASSIVE:
573 pstream_wait(conn->pconn.pstream);
574 for (i = 0; i < conn->pconn.n_streams; i++) {
575 struct dummy_packet_stream *s = conn->pconn.streams[i];
576 dummy_packet_stream_wait(s);
577 }
578 break;
579 case ACTIVE:
580 if (reconnect_is_connected(conn->rconn.reconnect)) {
581 dummy_packet_stream_wait(conn->rconn.rstream);
582 }
583 break;
584
585 case NONE:
586 default:
587 break;
588 }
589 }
590
591 static void
592 dummy_packet_conn_send(struct dummy_packet_conn *conn,
593 const void *buffer, size_t size)
594 {
595 int i;
596
597 switch (conn->type) {
598 case PASSIVE:
599 for (i = 0; i < conn->pconn.n_streams; i++) {
600 struct dummy_packet_stream *s = conn->pconn.streams[i];
601
602 dummy_packet_stream_send(s, buffer, size);
603 pstream_wait(conn->pconn.pstream);
604 }
605 break;
606
607 case ACTIVE:
608 if (reconnect_is_connected(conn->rconn.reconnect)) {
609 dummy_packet_stream_send(conn->rconn.rstream, buffer, size);
610 dummy_packet_stream_wait(conn->rconn.rstream);
611 }
612 break;
613
614 case NONE:
615 default:
616 break;
617 }
618 }
619
620 static enum dummy_netdev_conn_state
621 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
622 {
623 enum dummy_netdev_conn_state state;
624
625 if (conn->type == ACTIVE) {
626 if (reconnect_is_connected(conn->rconn.reconnect)) {
627 state = CONN_STATE_CONNECTED;
628 } else {
629 state = CONN_STATE_NOT_CONNECTED;
630 }
631 } else {
632 state = CONN_STATE_UNKNOWN;
633 }
634
635 return state;
636 }
637
638 static void
639 netdev_dummy_run(const struct netdev_class *netdev_class)
640 {
641 struct netdev_dummy *dev;
642
643 ovs_mutex_lock(&dummy_list_mutex);
644 LIST_FOR_EACH (dev, list_node, &dummy_list) {
645 if (netdev_get_class(&dev->up) != netdev_class) {
646 continue;
647 }
648 ovs_mutex_lock(&dev->mutex);
649 dummy_packet_conn_run(dev);
650 ovs_mutex_unlock(&dev->mutex);
651 }
652 ovs_mutex_unlock(&dummy_list_mutex);
653 }
654
655 static void
656 netdev_dummy_wait(const struct netdev_class *netdev_class)
657 {
658 struct netdev_dummy *dev;
659
660 ovs_mutex_lock(&dummy_list_mutex);
661 LIST_FOR_EACH (dev, list_node, &dummy_list) {
662 if (netdev_get_class(&dev->up) != netdev_class) {
663 continue;
664 }
665 ovs_mutex_lock(&dev->mutex);
666 dummy_packet_conn_wait(&dev->conn);
667 ovs_mutex_unlock(&dev->mutex);
668 }
669 ovs_mutex_unlock(&dummy_list_mutex);
670 }
671
672 static struct netdev *
673 netdev_dummy_alloc(void)
674 {
675 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
676 return &netdev->up;
677 }
678
679 static int
680 netdev_dummy_construct(struct netdev *netdev_)
681 {
682 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
683 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
684 unsigned int n;
685
686 n = atomic_count_inc(&next_n);
687
688 ovs_mutex_init(&netdev->mutex);
689 ovs_mutex_lock(&netdev->mutex);
690 netdev->hwaddr.ea[0] = 0xaa;
691 netdev->hwaddr.ea[1] = 0x55;
692 netdev->hwaddr.ea[2] = n >> 24;
693 netdev->hwaddr.ea[3] = n >> 16;
694 netdev->hwaddr.ea[4] = n >> 8;
695 netdev->hwaddr.ea[5] = n;
696 netdev->mtu = 1500;
697 netdev->flags = NETDEV_UP;
698 netdev->ifindex = -EOPNOTSUPP;
699 netdev->requested_n_rxq = netdev_->n_rxq;
700 netdev->requested_n_txq = netdev_->n_txq;
701 netdev->numa_id = 0;
702
703 memset(&netdev->custom_stats, 0, sizeof(netdev->custom_stats));
704
705 ovs_strlcpy(netdev->custom_stats[0].name,
706 "rx_custom_packets_1", NETDEV_CUSTOM_STATS_NAME_SIZE);
707 ovs_strlcpy(netdev->custom_stats[1].name,
708 "rx_custom_packets_2", NETDEV_CUSTOM_STATS_NAME_SIZE);
709
710 dummy_packet_conn_init(&netdev->conn);
711
712 ovs_list_init(&netdev->rxes);
713 hmap_init(&netdev->offloaded_flows);
714 ovs_mutex_unlock(&netdev->mutex);
715
716 ovs_mutex_lock(&dummy_list_mutex);
717 ovs_list_push_back(&dummy_list, &netdev->list_node);
718 ovs_mutex_unlock(&dummy_list_mutex);
719
720 return 0;
721 }
722
723 static void
724 netdev_dummy_destruct(struct netdev *netdev_)
725 {
726 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
727 struct offloaded_flow *off_flow;
728
729 ovs_mutex_lock(&dummy_list_mutex);
730 ovs_list_remove(&netdev->list_node);
731 ovs_mutex_unlock(&dummy_list_mutex);
732
733 ovs_mutex_lock(&netdev->mutex);
734 if (netdev->rxq_pcap) {
735 ovs_pcap_close(netdev->rxq_pcap);
736 }
737 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
738 ovs_pcap_close(netdev->tx_pcap);
739 }
740 dummy_packet_conn_close(&netdev->conn);
741 netdev->conn.type = NONE;
742
743 HMAP_FOR_EACH_POP (off_flow, node, &netdev->offloaded_flows) {
744 free(off_flow);
745 }
746 hmap_destroy(&netdev->offloaded_flows);
747
748 ovs_mutex_unlock(&netdev->mutex);
749 ovs_mutex_destroy(&netdev->mutex);
750 }
751
752 static void
753 netdev_dummy_dealloc(struct netdev *netdev_)
754 {
755 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
756
757 free(netdev);
758 }
759
760 static int
761 netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
762 {
763 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
764
765 ovs_mutex_lock(&netdev->mutex);
766
767 if (netdev->ifindex >= 0) {
768 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
769 }
770
771 dummy_packet_conn_get_config(&netdev->conn, args);
772
773 /* 'dummy-pmd' specific config. */
774 if (!netdev_is_pmd(dev)) {
775 goto exit;
776 }
777 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
778 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
779 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
780 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
781
782 exit:
783 ovs_mutex_unlock(&netdev->mutex);
784 return 0;
785 }
786
787 static int
788 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
789 struct in6_addr **pmask, int *n_addr)
790 {
791 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
792 int cnt = 0, i = 0, err = 0;
793 struct in6_addr *addr, *mask;
794
795 ovs_mutex_lock(&netdev->mutex);
796 if (netdev->address.s_addr != INADDR_ANY) {
797 cnt++;
798 }
799
800 if (ipv6_addr_is_set(&netdev->ipv6)) {
801 cnt++;
802 }
803 if (!cnt) {
804 err = EADDRNOTAVAIL;
805 goto out;
806 }
807 addr = xmalloc(sizeof *addr * cnt);
808 mask = xmalloc(sizeof *mask * cnt);
809 if (netdev->address.s_addr != INADDR_ANY) {
810 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
811 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
812 i++;
813 }
814
815 if (ipv6_addr_is_set(&netdev->ipv6)) {
816 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
817 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
818 i++;
819 }
820 if (paddr) {
821 *paddr = addr;
822 *pmask = mask;
823 *n_addr = cnt;
824 } else {
825 free(addr);
826 free(mask);
827 }
828 out:
829 ovs_mutex_unlock(&netdev->mutex);
830
831 return err;
832 }
833
834 static int
835 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
836 struct in_addr netmask)
837 {
838 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
839
840 ovs_mutex_lock(&netdev->mutex);
841 netdev->address = address;
842 netdev->netmask = netmask;
843 netdev_change_seq_changed(netdev_);
844 ovs_mutex_unlock(&netdev->mutex);
845
846 return 0;
847 }
848
849 static int
850 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
851 struct in6_addr *mask)
852 {
853 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
854
855 ovs_mutex_lock(&netdev->mutex);
856 netdev->ipv6 = *in6;
857 netdev->ipv6_mask = *mask;
858 netdev_change_seq_changed(netdev_);
859 ovs_mutex_unlock(&netdev->mutex);
860
861 return 0;
862 }
863
864 #define DUMMY_MAX_QUEUES_PER_PORT 1024
865
866 static int
867 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args,
868 char **errp OVS_UNUSED)
869 {
870 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
871 const char *pcap;
872 int new_n_rxq, new_n_txq, new_numa_id;
873
874 ovs_mutex_lock(&netdev->mutex);
875 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
876
877 dummy_packet_conn_set_config(&netdev->conn, args);
878
879 if (netdev->rxq_pcap) {
880 ovs_pcap_close(netdev->rxq_pcap);
881 }
882 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
883 ovs_pcap_close(netdev->tx_pcap);
884 }
885 netdev->rxq_pcap = netdev->tx_pcap = NULL;
886 pcap = smap_get(args, "pcap");
887 if (pcap) {
888 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
889 } else {
890 const char *rxq_pcap = smap_get(args, "rxq_pcap");
891 const char *tx_pcap = smap_get(args, "tx_pcap");
892
893 if (rxq_pcap) {
894 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
895 }
896 if (tx_pcap) {
897 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
898 }
899 }
900
901 netdev_change_seq_changed(netdev_);
902
903 /* 'dummy-pmd' specific config. */
904 if (!netdev_->netdev_class->is_pmd) {
905 goto exit;
906 }
907
908 new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
909 new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1);
910
911 if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT ||
912 new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) {
913 VLOG_WARN("The one or both of interface %s queues"
914 "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
915 netdev_get_name(netdev_),
916 new_n_rxq,
917 new_n_txq,
918 DUMMY_MAX_QUEUES_PER_PORT,
919 DUMMY_MAX_QUEUES_PER_PORT);
920
921 new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq);
922 new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq);
923 }
924
925 new_numa_id = smap_get_int(args, "numa_id", 0);
926 if (new_n_rxq != netdev->requested_n_rxq
927 || new_n_txq != netdev->requested_n_txq
928 || new_numa_id != netdev->requested_numa_id) {
929 netdev->requested_n_rxq = new_n_rxq;
930 netdev->requested_n_txq = new_n_txq;
931 netdev->requested_numa_id = new_numa_id;
932 netdev_request_reconfigure(netdev_);
933 }
934
935 exit:
936 ovs_mutex_unlock(&netdev->mutex);
937 return 0;
938 }
939
940 static int
941 netdev_dummy_get_numa_id(const struct netdev *netdev_)
942 {
943 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
944
945 ovs_mutex_lock(&netdev->mutex);
946 int numa_id = netdev->numa_id;
947 ovs_mutex_unlock(&netdev->mutex);
948
949 return numa_id;
950 }
951
952 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
953 static int
954 netdev_dummy_reconfigure(struct netdev *netdev_)
955 {
956 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
957
958 ovs_mutex_lock(&netdev->mutex);
959
960 netdev_->n_txq = netdev->requested_n_txq;
961 netdev_->n_rxq = netdev->requested_n_rxq;
962 netdev->numa_id = netdev->requested_numa_id;
963
964 ovs_mutex_unlock(&netdev->mutex);
965 return 0;
966 }
967
968 static struct netdev_rxq *
969 netdev_dummy_rxq_alloc(void)
970 {
971 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
972 return &rx->up;
973 }
974
975 static int
976 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
977 {
978 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
979 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
980
981 ovs_mutex_lock(&netdev->mutex);
982 ovs_list_push_back(&netdev->rxes, &rx->node);
983 ovs_list_init(&rx->recv_queue);
984 rx->recv_queue_len = 0;
985 rx->seq = seq_create();
986 ovs_mutex_unlock(&netdev->mutex);
987
988 return 0;
989 }
990
991 static void
992 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
993 {
994 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
995 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
996
997 ovs_mutex_lock(&netdev->mutex);
998 ovs_list_remove(&rx->node);
999 pkt_list_delete(&rx->recv_queue);
1000 ovs_mutex_unlock(&netdev->mutex);
1001 seq_destroy(rx->seq);
1002 }
1003
1004 static void
1005 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
1006 {
1007 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1008
1009 free(rx);
1010 }
1011
1012 static int
1013 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch,
1014 int *qfill)
1015 {
1016 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1017 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1018 struct dp_packet *packet;
1019
1020 ovs_mutex_lock(&netdev->mutex);
1021 if (!ovs_list_is_empty(&rx->recv_queue)) {
1022 struct pkt_list_node *pkt_node;
1023
1024 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
1025 packet = pkt_node->pkt;
1026 free(pkt_node);
1027 rx->recv_queue_len--;
1028 } else {
1029 packet = NULL;
1030 }
1031 ovs_mutex_unlock(&netdev->mutex);
1032
1033 if (!packet) {
1034 if (netdev_is_pmd(&netdev->up)) {
1035 /* If 'netdev' is a PMD device, this is called as part of the PMD
1036 * thread busy loop. We yield here (without quiescing) for two
1037 * reasons:
1038 *
1039 * - To reduce the CPU utilization during the testsuite
1040 * - To give valgrind a chance to switch thread. According
1041 * to the valgrind documentation, there's a big lock that
1042 * prevents multiple thread from being executed at the same
1043 * time. On my system, without this sleep, the pmd threads
1044 * testcases fail under valgrind, because ovs-vswitchd becomes
1045 * unresponsive. */
1046 sched_yield();
1047 }
1048 return EAGAIN;
1049 }
1050 ovs_mutex_lock(&netdev->mutex);
1051 netdev->stats.rx_packets++;
1052 netdev->stats.rx_bytes += dp_packet_size(packet);
1053 netdev->custom_stats[0].value++;
1054 netdev->custom_stats[1].value++;
1055 ovs_mutex_unlock(&netdev->mutex);
1056
1057 dp_packet_batch_init_packet(batch, packet);
1058
1059 if (qfill) {
1060 *qfill = -ENOTSUP;
1061 }
1062
1063 return 0;
1064 }
1065
1066 static void
1067 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
1068 {
1069 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1070 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1071 uint64_t seq = seq_read(rx->seq);
1072
1073 ovs_mutex_lock(&netdev->mutex);
1074 if (!ovs_list_is_empty(&rx->recv_queue)) {
1075 poll_immediate_wake();
1076 } else {
1077 seq_wait(rx->seq, seq);
1078 }
1079 ovs_mutex_unlock(&netdev->mutex);
1080 }
1081
1082 static int
1083 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
1084 {
1085 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1086 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1087
1088 ovs_mutex_lock(&netdev->mutex);
1089 pkt_list_delete(&rx->recv_queue);
1090 rx->recv_queue_len = 0;
1091 ovs_mutex_unlock(&netdev->mutex);
1092
1093 seq_change(rx->seq);
1094
1095 return 0;
1096 }
1097
1098 static int
1099 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
1100 struct dp_packet_batch *batch,
1101 bool concurrent_txq OVS_UNUSED)
1102 {
1103 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1104 int error = 0;
1105
1106 struct dp_packet *packet;
1107 DP_PACKET_BATCH_FOR_EACH(i, packet, batch) {
1108 const void *buffer = dp_packet_data(packet);
1109 size_t size = dp_packet_size(packet);
1110
1111 if (!dp_packet_is_eth(packet)) {
1112 error = EPFNOSUPPORT;
1113 break;
1114 }
1115
1116 if (size < ETH_HEADER_LEN) {
1117 error = EMSGSIZE;
1118 break;
1119 } else {
1120 const struct eth_header *eth = buffer;
1121 int max_size;
1122
1123 ovs_mutex_lock(&dev->mutex);
1124 max_size = dev->mtu + ETH_HEADER_LEN;
1125 ovs_mutex_unlock(&dev->mutex);
1126
1127 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1128 max_size += VLAN_HEADER_LEN;
1129 }
1130 if (size > max_size) {
1131 error = EMSGSIZE;
1132 break;
1133 }
1134 }
1135
1136 ovs_mutex_lock(&dev->mutex);
1137 dev->stats.tx_packets++;
1138 dev->stats.tx_bytes += size;
1139
1140 dummy_packet_conn_send(&dev->conn, buffer, size);
1141
1142 /* Reply to ARP requests for 'dev''s assigned IP address. */
1143 if (dev->address.s_addr) {
1144 struct dp_packet dp;
1145 struct flow flow;
1146
1147 dp_packet_use_const(&dp, buffer, size);
1148 flow_extract(&dp, &flow);
1149 if (flow.dl_type == htons(ETH_TYPE_ARP)
1150 && flow.nw_proto == ARP_OP_REQUEST
1151 && flow.nw_dst == dev->address.s_addr) {
1152 struct dp_packet *reply = dp_packet_new(0);
1153 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1154 false, flow.nw_dst, flow.nw_src);
1155 netdev_dummy_queue_packet(dev, reply, NULL, 0);
1156 }
1157 }
1158
1159 if (dev->tx_pcap) {
1160 struct dp_packet dp;
1161
1162 dp_packet_use_const(&dp, buffer, size);
1163 ovs_pcap_write(dev->tx_pcap, &dp);
1164 }
1165
1166 ovs_mutex_unlock(&dev->mutex);
1167 }
1168
1169 dp_packet_delete_batch(batch, true);
1170
1171 return error;
1172 }
1173
1174 static int
1175 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1176 {
1177 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1178
1179 ovs_mutex_lock(&dev->mutex);
1180 if (!eth_addr_equals(dev->hwaddr, mac)) {
1181 dev->hwaddr = mac;
1182 netdev_change_seq_changed(netdev);
1183 }
1184 ovs_mutex_unlock(&dev->mutex);
1185
1186 return 0;
1187 }
1188
1189 static int
1190 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1191 {
1192 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1193
1194 ovs_mutex_lock(&dev->mutex);
1195 *mac = dev->hwaddr;
1196 ovs_mutex_unlock(&dev->mutex);
1197
1198 return 0;
1199 }
1200
1201 static int
1202 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1203 {
1204 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1205
1206 ovs_mutex_lock(&dev->mutex);
1207 *mtup = dev->mtu;
1208 ovs_mutex_unlock(&dev->mutex);
1209
1210 return 0;
1211 }
1212
1213 #define DUMMY_MIN_MTU 68
1214 #define DUMMY_MAX_MTU 65535
1215
1216 static int
1217 netdev_dummy_set_mtu(struct netdev *netdev, int mtu)
1218 {
1219 if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) {
1220 return EINVAL;
1221 }
1222
1223 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1224
1225 ovs_mutex_lock(&dev->mutex);
1226 if (dev->mtu != mtu) {
1227 dev->mtu = mtu;
1228 netdev_change_seq_changed(netdev);
1229 }
1230 ovs_mutex_unlock(&dev->mutex);
1231
1232 return 0;
1233 }
1234
1235 static int
1236 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1237 {
1238 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1239
1240 ovs_mutex_lock(&dev->mutex);
1241 /* Passing only collected counters */
1242 stats->tx_packets = dev->stats.tx_packets;
1243 stats->tx_bytes = dev->stats.tx_bytes;
1244 stats->rx_packets = dev->stats.rx_packets;
1245 stats->rx_bytes = dev->stats.rx_bytes;
1246 ovs_mutex_unlock(&dev->mutex);
1247
1248 return 0;
1249 }
1250
1251 static int
1252 netdev_dummy_get_custom_stats(const struct netdev *netdev,
1253 struct netdev_custom_stats *custom_stats)
1254 {
1255 int i;
1256
1257 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1258
1259 custom_stats->size = 2;
1260 custom_stats->counters =
1261 (struct netdev_custom_counter *) xcalloc(C_STATS_SIZE,
1262 sizeof(struct netdev_custom_counter));
1263
1264 ovs_mutex_lock(&dev->mutex);
1265 for (i = 0 ; i < C_STATS_SIZE ; i++) {
1266 custom_stats->counters[i].value = dev->custom_stats[i].value;
1267 ovs_strlcpy(custom_stats->counters[i].name,
1268 dev->custom_stats[i].name,
1269 NETDEV_CUSTOM_STATS_NAME_SIZE);
1270 }
1271 ovs_mutex_unlock(&dev->mutex);
1272
1273 return 0;
1274 }
1275
1276 static int
1277 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1278 unsigned int queue_id, struct smap *details OVS_UNUSED)
1279 {
1280 if (queue_id == 0) {
1281 return 0;
1282 } else {
1283 return EINVAL;
1284 }
1285 }
1286
1287 static void
1288 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1289 {
1290 *stats = (struct netdev_queue_stats) {
1291 .tx_bytes = UINT64_MAX,
1292 .tx_packets = UINT64_MAX,
1293 .tx_errors = UINT64_MAX,
1294 .created = LLONG_MIN,
1295 };
1296 }
1297
1298 static int
1299 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1300 unsigned int queue_id,
1301 struct netdev_queue_stats *stats)
1302 {
1303 if (queue_id == 0) {
1304 netdev_dummy_init_queue_stats(stats);
1305 return 0;
1306 } else {
1307 return EINVAL;
1308 }
1309 }
1310
1311 struct netdev_dummy_queue_state {
1312 unsigned int next_queue;
1313 };
1314
1315 static int
1316 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1317 void **statep)
1318 {
1319 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1320 state->next_queue = 0;
1321 *statep = state;
1322 return 0;
1323 }
1324
1325 static int
1326 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1327 void *state_,
1328 unsigned int *queue_id,
1329 struct smap *details OVS_UNUSED)
1330 {
1331 struct netdev_dummy_queue_state *state = state_;
1332 if (state->next_queue == 0) {
1333 *queue_id = 0;
1334 state->next_queue++;
1335 return 0;
1336 } else {
1337 return EOF;
1338 }
1339 }
1340
1341 static int
1342 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1343 void *state)
1344 {
1345 free(state);
1346 return 0;
1347 }
1348
1349 static int
1350 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1351 void (*cb)(unsigned int queue_id,
1352 struct netdev_queue_stats *,
1353 void *aux),
1354 void *aux)
1355 {
1356 struct netdev_queue_stats stats;
1357 netdev_dummy_init_queue_stats(&stats);
1358 cb(0, &stats, aux);
1359 return 0;
1360 }
1361
1362 static int
1363 netdev_dummy_get_ifindex(const struct netdev *netdev)
1364 {
1365 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1366 int ifindex;
1367
1368 ovs_mutex_lock(&dev->mutex);
1369 ifindex = dev->ifindex;
1370 ovs_mutex_unlock(&dev->mutex);
1371
1372 return ifindex;
1373 }
1374
1375 static int
1376 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1377 enum netdev_flags off, enum netdev_flags on,
1378 enum netdev_flags *old_flagsp)
1379 OVS_REQUIRES(netdev->mutex)
1380 {
1381 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1382 return EINVAL;
1383 }
1384
1385 *old_flagsp = netdev->flags;
1386 netdev->flags |= on;
1387 netdev->flags &= ~off;
1388 if (*old_flagsp != netdev->flags) {
1389 netdev_change_seq_changed(&netdev->up);
1390 }
1391
1392 return 0;
1393 }
1394
1395 static int
1396 netdev_dummy_update_flags(struct netdev *netdev_,
1397 enum netdev_flags off, enum netdev_flags on,
1398 enum netdev_flags *old_flagsp)
1399 {
1400 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1401 int error;
1402
1403 ovs_mutex_lock(&netdev->mutex);
1404 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1405 ovs_mutex_unlock(&netdev->mutex);
1406
1407 return error;
1408 }
1409
1410 /* Flow offload API. */
1411 static uint32_t
1412 netdev_dummy_flow_hash(const ovs_u128 *ufid)
1413 {
1414 return ufid->u32[0];
1415 }
1416
1417 static struct offloaded_flow *
1418 find_offloaded_flow(const struct hmap *offloaded_flows, const ovs_u128 *ufid)
1419 {
1420 uint32_t hash = netdev_dummy_flow_hash(ufid);
1421 struct offloaded_flow *data;
1422
1423 HMAP_FOR_EACH_WITH_HASH (data, node, hash, offloaded_flows) {
1424 if (ovs_u128_equals(*ufid, data->ufid)) {
1425 return data;
1426 }
1427 }
1428
1429 return NULL;
1430 }
1431
1432 static int
1433 netdev_dummy_flow_put(struct netdev *netdev, struct match *match,
1434 struct nlattr *actions OVS_UNUSED,
1435 size_t actions_len OVS_UNUSED,
1436 const ovs_u128 *ufid, struct offload_info *info,
1437 struct dpif_flow_stats *stats)
1438 {
1439 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1440 struct offloaded_flow *off_flow;
1441 bool modify = true;
1442
1443 ovs_mutex_lock(&dev->mutex);
1444
1445 off_flow = find_offloaded_flow(&dev->offloaded_flows, ufid);
1446 if (!off_flow) {
1447 /* Create new offloaded flow. */
1448 off_flow = xzalloc(sizeof *off_flow);
1449 memcpy(&off_flow->ufid, ufid, sizeof *ufid);
1450 hmap_insert(&dev->offloaded_flows, &off_flow->node,
1451 netdev_dummy_flow_hash(ufid));
1452 modify = false;
1453 }
1454
1455 off_flow->mark = info->flow_mark;
1456 memcpy(&off_flow->match, match, sizeof *match);
1457
1458 /* As we have per-netdev 'offloaded_flows', we don't need to match
1459 * the 'in_port' for received packets. This will also allow offloading for
1460 * packets passed to 'receive' command without specifying the 'in_port'. */
1461 off_flow->match.wc.masks.in_port.odp_port = 0;
1462
1463 ovs_mutex_unlock(&dev->mutex);
1464
1465 if (VLOG_IS_DBG_ENABLED()) {
1466 struct ds ds = DS_EMPTY_INITIALIZER;
1467
1468 ds_put_format(&ds, "%s: flow put[%s]: ", netdev_get_name(netdev),
1469 modify ? "modify" : "create");
1470 odp_format_ufid(ufid, &ds);
1471 ds_put_cstr(&ds, " flow match: ");
1472 match_format(match, NULL, &ds, OFP_DEFAULT_PRIORITY);
1473 ds_put_format(&ds, ", mark: %"PRIu32, info->flow_mark);
1474
1475 VLOG_DBG("%s", ds_cstr(&ds));
1476 ds_destroy(&ds);
1477 }
1478
1479 if (stats) {
1480 memset(stats, 0, sizeof *stats);
1481 }
1482 return 0;
1483 }
1484
1485 static int
1486 netdev_dummy_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
1487 struct dpif_flow_stats *stats)
1488 {
1489 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1490 struct offloaded_flow *off_flow;
1491 const char *error = NULL;
1492 uint32_t mark;
1493
1494 ovs_mutex_lock(&dev->mutex);
1495
1496 off_flow = find_offloaded_flow(&dev->offloaded_flows, ufid);
1497 if (!off_flow) {
1498 error = "No such flow.";
1499 goto exit;
1500 }
1501
1502 mark = off_flow->mark;
1503 hmap_remove(&dev->offloaded_flows, &off_flow->node);
1504 free(off_flow);
1505
1506 exit:
1507 ovs_mutex_unlock(&dev->mutex);
1508
1509 if (error || VLOG_IS_DBG_ENABLED()) {
1510 struct ds ds = DS_EMPTY_INITIALIZER;
1511
1512 ds_put_format(&ds, "%s: ", netdev_get_name(netdev));
1513 if (error) {
1514 ds_put_cstr(&ds, "failed to ");
1515 }
1516 ds_put_cstr(&ds, "flow del: ");
1517 odp_format_ufid(ufid, &ds);
1518 if (error) {
1519 ds_put_format(&ds, " error: %s", error);
1520 } else {
1521 ds_put_format(&ds, " mark: %"PRIu32, mark);
1522 }
1523 VLOG(error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1524 ds_destroy(&ds);
1525 }
1526
1527 if (stats) {
1528 memset(stats, 0, sizeof *stats);
1529 }
1530 return error ? -1 : 0;
1531 }
1532
1533 #define NETDEV_DUMMY_CLASS_COMMON \
1534 .run = netdev_dummy_run, \
1535 .wait = netdev_dummy_wait, \
1536 .alloc = netdev_dummy_alloc, \
1537 .construct = netdev_dummy_construct, \
1538 .destruct = netdev_dummy_destruct, \
1539 .dealloc = netdev_dummy_dealloc, \
1540 .get_config = netdev_dummy_get_config, \
1541 .set_config = netdev_dummy_set_config, \
1542 .get_numa_id = netdev_dummy_get_numa_id, \
1543 .send = netdev_dummy_send, \
1544 .set_etheraddr = netdev_dummy_set_etheraddr, \
1545 .get_etheraddr = netdev_dummy_get_etheraddr, \
1546 .get_mtu = netdev_dummy_get_mtu, \
1547 .set_mtu = netdev_dummy_set_mtu, \
1548 .get_ifindex = netdev_dummy_get_ifindex, \
1549 .get_stats = netdev_dummy_get_stats, \
1550 .get_custom_stats = netdev_dummy_get_custom_stats, \
1551 .get_queue = netdev_dummy_get_queue, \
1552 .get_queue_stats = netdev_dummy_get_queue_stats, \
1553 .queue_dump_start = netdev_dummy_queue_dump_start, \
1554 .queue_dump_next = netdev_dummy_queue_dump_next, \
1555 .queue_dump_done = netdev_dummy_queue_dump_done, \
1556 .dump_queue_stats = netdev_dummy_dump_queue_stats, \
1557 .get_addr_list = netdev_dummy_get_addr_list, \
1558 .update_flags = netdev_dummy_update_flags, \
1559 .rxq_alloc = netdev_dummy_rxq_alloc, \
1560 .rxq_construct = netdev_dummy_rxq_construct, \
1561 .rxq_destruct = netdev_dummy_rxq_destruct, \
1562 .rxq_dealloc = netdev_dummy_rxq_dealloc, \
1563 .rxq_recv = netdev_dummy_rxq_recv, \
1564 .rxq_wait = netdev_dummy_rxq_wait, \
1565 .rxq_drain = netdev_dummy_rxq_drain
1566
1567 static const struct netdev_class dummy_class = {
1568 NETDEV_DUMMY_CLASS_COMMON,
1569 .type = "dummy"
1570 };
1571
1572 static const struct netdev_class dummy_internal_class = {
1573 NETDEV_DUMMY_CLASS_COMMON,
1574 .type = "dummy-internal"
1575 };
1576
1577 static const struct netdev_class dummy_pmd_class = {
1578 NETDEV_DUMMY_CLASS_COMMON,
1579 .type = "dummy-pmd",
1580 .is_pmd = true,
1581 .reconfigure = netdev_dummy_reconfigure
1582 };
1583
1584 static int
1585 netdev_dummy_offloads_init_flow_api(struct netdev *netdev)
1586 {
1587 return is_dummy_class(netdev->netdev_class) ? 0 : EOPNOTSUPP;
1588 }
1589
1590 static const struct netdev_flow_api netdev_offload_dummy = {
1591 .type = "dummy",
1592 .flow_put = netdev_dummy_flow_put,
1593 .flow_del = netdev_dummy_flow_del,
1594 .init_flow_api = netdev_dummy_offloads_init_flow_api,
1595 };
1596
1597 \f
1598 /* Helper functions. */
1599
1600 static void
1601 pkt_list_delete(struct ovs_list *l)
1602 {
1603 struct pkt_list_node *pkt;
1604
1605 LIST_FOR_EACH_POP(pkt, list_node, l) {
1606 dp_packet_delete(pkt->pkt);
1607 free(pkt);
1608 }
1609 }
1610
1611 static struct dp_packet *
1612 eth_from_packet(const char *s)
1613 {
1614 struct dp_packet *packet;
1615 eth_from_hex(s, &packet);
1616 return packet;
1617 }
1618
1619 static struct dp_packet *
1620 eth_from_flow_str(const char *s, size_t packet_size,
1621 struct flow *flow, char **errorp)
1622 {
1623 *errorp = NULL;
1624
1625 enum odp_key_fitness fitness;
1626 struct dp_packet *packet;
1627 struct ofpbuf odp_key;
1628 int error;
1629
1630 /* Convert string to datapath key.
1631 *
1632 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1633 * the code for that currently calls exit() on parse error. We have to
1634 * settle for parsing a datapath key for now.
1635 */
1636 ofpbuf_init(&odp_key, 0);
1637 error = odp_flow_from_string(s, NULL, &odp_key, NULL, errorp);
1638 if (error) {
1639 ofpbuf_uninit(&odp_key);
1640 return NULL;
1641 }
1642
1643 /* Convert odp_key to flow. */
1644 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, flow, errorp);
1645 if (fitness == ODP_FIT_ERROR) {
1646 ofpbuf_uninit(&odp_key);
1647 return NULL;
1648 }
1649
1650 packet = dp_packet_new(0);
1651 if (packet_size) {
1652 flow_compose(packet, flow, NULL, 0);
1653 if (dp_packet_size(packet) < packet_size) {
1654 packet_expand(packet, flow, packet_size);
1655 } else if (dp_packet_size(packet) > packet_size){
1656 dp_packet_delete(packet);
1657 packet = NULL;
1658 }
1659 } else {
1660 flow_compose(packet, flow, NULL, 64);
1661 }
1662
1663 ofpbuf_uninit(&odp_key);
1664 return packet;
1665 }
1666
1667 static void
1668 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1669 {
1670 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1671
1672 pkt_node->pkt = packet;
1673 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1674 rx->recv_queue_len++;
1675 seq_change(rx->seq);
1676 }
1677
1678 static void
1679 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
1680 struct flow *flow, int queue_id)
1681 OVS_REQUIRES(dummy->mutex)
1682 {
1683 struct netdev_rxq_dummy *rx, *prev;
1684 struct offloaded_flow *data;
1685 struct flow packet_flow;
1686
1687 if (dummy->rxq_pcap) {
1688 ovs_pcap_write(dummy->rxq_pcap, packet);
1689 }
1690
1691 if (!flow) {
1692 flow = &packet_flow;
1693 flow_extract(packet, flow);
1694 }
1695 HMAP_FOR_EACH (data, node, &dummy->offloaded_flows) {
1696 if (flow_equal_except(flow, &data->match.flow, &data->match.wc)) {
1697
1698 dp_packet_set_flow_mark(packet, data->mark);
1699
1700 if (VLOG_IS_DBG_ENABLED()) {
1701 struct ds ds = DS_EMPTY_INITIALIZER;
1702
1703 ds_put_format(&ds, "%s: packet: ",
1704 netdev_get_name(&dummy->up));
1705 /* 'flow' does not contain proper port number here.
1706 * Let's just clear it as it wildcarded anyway. */
1707 flow->in_port.ofp_port = 0;
1708 flow_format(&ds, flow, NULL);
1709
1710 ds_put_cstr(&ds, " matches with flow: ");
1711 odp_format_ufid(&data->ufid, &ds);
1712 ds_put_cstr(&ds, " ");
1713 match_format(&data->match, NULL, &ds, OFP_DEFAULT_PRIORITY);
1714 ds_put_format(&ds, " with mark: %"PRIu32, data->mark);
1715
1716 VLOG_DBG("%s", ds_cstr(&ds));
1717 ds_destroy(&ds);
1718 }
1719 break;
1720 }
1721 }
1722
1723 prev = NULL;
1724 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1725 if (rx->up.queue_id == queue_id &&
1726 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1727 if (prev) {
1728 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1729 }
1730 prev = rx;
1731 }
1732 }
1733 if (prev) {
1734 netdev_dummy_queue_packet__(prev, packet);
1735 } else {
1736 dp_packet_delete(packet);
1737 }
1738 }
1739
1740 static void
1741 netdev_dummy_receive(struct unixctl_conn *conn,
1742 int argc, const char *argv[], void *aux OVS_UNUSED)
1743 {
1744 struct netdev_dummy *dummy_dev;
1745 struct netdev *netdev;
1746 int i, k = 1, rx_qid = 0;
1747
1748 netdev = netdev_from_name(argv[k++]);
1749 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1750 unixctl_command_reply_error(conn, "no such dummy netdev");
1751 goto exit_netdev;
1752 }
1753 dummy_dev = netdev_dummy_cast(netdev);
1754
1755 ovs_mutex_lock(&dummy_dev->mutex);
1756
1757 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1758 rx_qid = strtol(argv[k + 1], NULL, 10);
1759 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1760 unixctl_command_reply_error(conn, "bad rx queue id.");
1761 goto exit;
1762 }
1763 k += 2;
1764 }
1765
1766 for (i = k; i < argc; i++) {
1767 struct dp_packet *packet;
1768 struct flow flow;
1769
1770 /* Try to parse 'argv[i]' as packet in hex. */
1771 packet = eth_from_packet(argv[i]);
1772
1773 if (!packet) {
1774 int packet_size = 0;
1775 const char *flow_str = argv[i];
1776
1777 /* Parse optional --len argument immediately follows a 'flow'. */
1778 if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) {
1779 packet_size = strtol(argv[i + 2], NULL, 10);
1780
1781 if (packet_size < ETH_TOTAL_MIN) {
1782 unixctl_command_reply_error(conn, "too small packet len");
1783 goto exit;
1784 }
1785 i += 2;
1786 }
1787 /* Try parse 'argv[i]' as odp flow. */
1788 char *error_s;
1789 packet = eth_from_flow_str(flow_str, packet_size, &flow, &error_s);
1790 if (!packet) {
1791 unixctl_command_reply_error(conn, error_s);
1792 free(error_s);
1793 goto exit;
1794 }
1795 } else {
1796 flow_extract(packet, &flow);
1797 }
1798
1799 netdev_dummy_queue_packet(dummy_dev, packet, &flow, rx_qid);
1800 }
1801
1802 unixctl_command_reply(conn, NULL);
1803
1804 exit:
1805 ovs_mutex_unlock(&dummy_dev->mutex);
1806 exit_netdev:
1807 netdev_close(netdev);
1808 }
1809
1810 static void
1811 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1812 OVS_REQUIRES(dev->mutex)
1813 {
1814 enum netdev_flags old_flags;
1815
1816 if (admin_state) {
1817 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1818 } else {
1819 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1820 }
1821 }
1822
1823 static void
1824 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1825 const char *argv[], void *aux OVS_UNUSED)
1826 {
1827 bool up;
1828
1829 if (!strcasecmp(argv[argc - 1], "up")) {
1830 up = true;
1831 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1832 up = false;
1833 } else {
1834 unixctl_command_reply_error(conn, "Invalid Admin State");
1835 return;
1836 }
1837
1838 if (argc > 2) {
1839 struct netdev *netdev = netdev_from_name(argv[1]);
1840 if (netdev && is_dummy_class(netdev->netdev_class)) {
1841 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1842
1843 ovs_mutex_lock(&dummy_dev->mutex);
1844 netdev_dummy_set_admin_state__(dummy_dev, up);
1845 ovs_mutex_unlock(&dummy_dev->mutex);
1846
1847 netdev_close(netdev);
1848 } else {
1849 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1850 netdev_close(netdev);
1851 return;
1852 }
1853 } else {
1854 struct netdev_dummy *netdev;
1855
1856 ovs_mutex_lock(&dummy_list_mutex);
1857 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1858 ovs_mutex_lock(&netdev->mutex);
1859 netdev_dummy_set_admin_state__(netdev, up);
1860 ovs_mutex_unlock(&netdev->mutex);
1861 }
1862 ovs_mutex_unlock(&dummy_list_mutex);
1863 }
1864 unixctl_command_reply(conn, "OK");
1865 }
1866
1867 static void
1868 display_conn_state__(struct ds *s, const char *name,
1869 enum dummy_netdev_conn_state state)
1870 {
1871 ds_put_format(s, "%s: ", name);
1872
1873 switch (state) {
1874 case CONN_STATE_CONNECTED:
1875 ds_put_cstr(s, "connected\n");
1876 break;
1877
1878 case CONN_STATE_NOT_CONNECTED:
1879 ds_put_cstr(s, "disconnected\n");
1880 break;
1881
1882 case CONN_STATE_UNKNOWN:
1883 default:
1884 ds_put_cstr(s, "unknown\n");
1885 break;
1886 };
1887 }
1888
1889 static void
1890 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1891 const char *argv[], void *aux OVS_UNUSED)
1892 {
1893 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1894 struct ds s;
1895
1896 ds_init(&s);
1897
1898 if (argc > 1) {
1899 const char *dev_name = argv[1];
1900 struct netdev *netdev = netdev_from_name(dev_name);
1901
1902 if (netdev && is_dummy_class(netdev->netdev_class)) {
1903 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1904
1905 ovs_mutex_lock(&dummy_dev->mutex);
1906 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1907 ovs_mutex_unlock(&dummy_dev->mutex);
1908
1909 netdev_close(netdev);
1910 }
1911 display_conn_state__(&s, dev_name, state);
1912 } else {
1913 struct netdev_dummy *netdev;
1914
1915 ovs_mutex_lock(&dummy_list_mutex);
1916 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1917 ovs_mutex_lock(&netdev->mutex);
1918 state = dummy_netdev_get_conn_state(&netdev->conn);
1919 ovs_mutex_unlock(&netdev->mutex);
1920 if (state != CONN_STATE_UNKNOWN) {
1921 display_conn_state__(&s, netdev->up.name, state);
1922 }
1923 }
1924 ovs_mutex_unlock(&dummy_list_mutex);
1925 }
1926
1927 unixctl_command_reply(conn, ds_cstr(&s));
1928 ds_destroy(&s);
1929 }
1930
1931 static void
1932 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1933 const char *argv[], void *aux OVS_UNUSED)
1934 {
1935 struct netdev *netdev = netdev_from_name(argv[1]);
1936
1937 if (netdev && is_dummy_class(netdev->netdev_class)) {
1938 struct in_addr ip, mask;
1939 char *error;
1940
1941 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1942 if (!error) {
1943 netdev_dummy_set_in4(netdev, ip, mask);
1944 unixctl_command_reply(conn, "OK");
1945 } else {
1946 unixctl_command_reply_error(conn, error);
1947 free(error);
1948 }
1949 } else {
1950 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1951 }
1952
1953 netdev_close(netdev);
1954 }
1955
1956 static void
1957 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1958 const char *argv[], void *aux OVS_UNUSED)
1959 {
1960 struct netdev *netdev = netdev_from_name(argv[1]);
1961
1962 if (netdev && is_dummy_class(netdev->netdev_class)) {
1963 struct in6_addr ip6;
1964 char *error;
1965 uint32_t plen;
1966
1967 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1968 if (!error) {
1969 struct in6_addr mask;
1970
1971 mask = ipv6_create_mask(plen);
1972 netdev_dummy_set_in6(netdev, &ip6, &mask);
1973 unixctl_command_reply(conn, "OK");
1974 } else {
1975 unixctl_command_reply_error(conn, error);
1976 free(error);
1977 }
1978 } else {
1979 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1980 }
1981
1982 netdev_close(netdev);
1983 }
1984
1985
1986 static void
1987 netdev_dummy_override(const char *type)
1988 {
1989 if (!netdev_unregister_provider(type)) {
1990 struct netdev_class *class;
1991 int error;
1992
1993 class = xmemdup(&dummy_class, sizeof dummy_class);
1994 class->type = xstrdup(type);
1995 error = netdev_register_provider(class);
1996 if (error) {
1997 VLOG_ERR("%s: failed to register netdev provider (%s)",
1998 type, ovs_strerror(error));
1999 free(CONST_CAST(char *, class->type));
2000 free(class);
2001 }
2002 }
2003 }
2004
2005 void
2006 netdev_dummy_register(enum dummy_level level)
2007 {
2008 unixctl_command_register("netdev-dummy/receive",
2009 "name [--qid queue_id] packet|flow [--len packet_len]",
2010 2, INT_MAX, netdev_dummy_receive, NULL);
2011 unixctl_command_register("netdev-dummy/set-admin-state",
2012 "[netdev] up|down", 1, 2,
2013 netdev_dummy_set_admin_state, NULL);
2014 unixctl_command_register("netdev-dummy/conn-state",
2015 "[netdev]", 0, 1,
2016 netdev_dummy_conn_state, NULL);
2017 unixctl_command_register("netdev-dummy/ip4addr",
2018 "[netdev] ipaddr/mask-prefix-len", 2, 2,
2019 netdev_dummy_ip4addr, NULL);
2020 unixctl_command_register("netdev-dummy/ip6addr",
2021 "[netdev] ip6addr", 2, 2,
2022 netdev_dummy_ip6addr, NULL);
2023
2024 if (level == DUMMY_OVERRIDE_ALL) {
2025 struct sset types;
2026 const char *type;
2027
2028 sset_init(&types);
2029 netdev_enumerate_types(&types);
2030 SSET_FOR_EACH (type, &types) {
2031 if (strcmp(type, "patch")) {
2032 netdev_dummy_override(type);
2033 }
2034 }
2035 sset_destroy(&types);
2036 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
2037 netdev_dummy_override("system");
2038 }
2039 netdev_register_provider(&dummy_class);
2040 netdev_register_provider(&dummy_internal_class);
2041 netdev_register_provider(&dummy_pmd_class);
2042
2043 netdev_register_flow_api_provider(&netdev_offload_dummy);
2044
2045 netdev_vport_tunnel_register();
2046 }