]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netdev-dummy.c
bfd: Support overlay BFD
[mirror_ovs.git] / lib / netdev-dummy.c
CommitLineData
614c4892 1/*
d8ada236 2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
614c4892
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
19#include "dummy.h"
20
21#include <errno.h>
f8cf6502 22#include <unistd.h>
614c4892 23
e14deea0 24#include "dp-packet.h"
df1e5a3b 25#include "dpif-netdev.h"
fbac791a 26#include "flow.h"
5fc5c50f 27#include "netdev-offload-provider.h"
614c4892 28#include "netdev-provider.h"
c060c4cf 29#include "netdev-vport.h"
fbac791a 30#include "odp-util.h"
25d436fb
BW
31#include "openvswitch/dynamic-string.h"
32#include "openvswitch/list.h"
b4f86fcc 33#include "openvswitch/match.h"
25d436fb 34#include "openvswitch/ofp-print.h"
64c96779 35#include "openvswitch/ofpbuf.h"
25d436fb 36#include "openvswitch/vlog.h"
5617ae6a 37#include "ovs-atomic.h"
614c4892 38#include "packets.h"
55d87b06 39#include "pcap-file.h"
fd016ae3 40#include "openvswitch/poll-loop.h"
ee89ea7b 41#include "openvswitch/shash.h"
0cbfe35d 42#include "sset.h"
eab5611a
BP
43#include "stream.h"
44#include "unaligned.h"
55d87b06 45#include "timeval.h"
fbac791a 46#include "unixctl.h"
631486bd 47#include "reconnect.h"
614c4892
BP
48
49VLOG_DEFINE_THIS_MODULE(netdev_dummy);
50
971f4b39
MW
51#define C_STATS_SIZE 2
52
631486bd
AZ
53struct reconnect;
54
55struct dummy_packet_stream {
eab5611a 56 struct stream *stream;
ca6ba700 57 struct ovs_list txq;
7a385993 58 struct dp_packet rxbuf;
eab5611a
BP
59};
60
631486bd
AZ
61enum dummy_packet_conn_type {
62 NONE, /* No connection is configured. */
63 PASSIVE, /* Listener. */
64 ACTIVE /* Connect to listener. */
65};
66
7d7fffe8
AZ
67enum dummy_netdev_conn_state {
68 CONN_STATE_CONNECTED, /* Listener connected. */
69 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
70 CONN_STATE_UNKNOWN, /* No relavent information. */
71};
72
631486bd
AZ
73struct dummy_packet_pconn {
74 struct pstream *pstream;
7778360b 75 struct dummy_packet_stream **streams;
631486bd
AZ
76 size_t n_streams;
77};
78
79struct dummy_packet_rconn {
80 struct dummy_packet_stream *rstream;
81 struct reconnect *reconnect;
82};
83
84struct dummy_packet_conn {
85 enum dummy_packet_conn_type type;
86 union {
87 struct dummy_packet_pconn pconn;
88 struct dummy_packet_rconn rconn;
fa37affa 89 };
631486bd
AZ
90};
91
8613db65
DDP
92struct pkt_list_node {
93 struct dp_packet *pkt;
94 struct ovs_list list_node;
95};
96
b4f86fcc
IM
97struct offloaded_flow {
98 struct hmap_node node;
99 ovs_u128 ufid;
100 struct match match;
101 uint32_t mark;
102};
103
5e1de67f
BP
104/* Protects 'dummy_list'. */
105static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
106
107/* Contains all 'struct dummy_dev's. */
ca6ba700 108static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
55951e15 109 = OVS_LIST_INITIALIZER(&dummy_list);
5e1de67f 110
b5d57fc8
BP
111struct netdev_dummy {
112 struct netdev up;
86383816 113
5e1de67f 114 /* In dummy_list. */
ca6ba700 115 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
5e1de67f 116
86383816 117 /* Protects all members below. */
5e1de67f 118 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
86383816 119
74ff3298 120 struct eth_addr hwaddr OVS_GUARDED;
36e2140d
BP
121 int mtu OVS_GUARDED;
122 struct netdev_stats stats OVS_GUARDED;
971f4b39 123 struct netdev_custom_counter custom_stats[C_STATS_SIZE] OVS_GUARDED;
36e2140d 124 enum netdev_flags flags OVS_GUARDED;
36e2140d 125 int ifindex OVS_GUARDED;
d537e73a 126 int numa_id OVS_GUARDED;
36e2140d 127
631486bd 128 struct dummy_packet_conn conn OVS_GUARDED;
36e2140d 129
b6e840ae 130 struct pcap_file *tx_pcap, *rxq_pcap OVS_GUARDED;
55d87b06 131
a36de779 132 struct in_addr address, netmask;
a8704b50 133 struct in6_addr ipv6, ipv6_mask;
ca6ba700 134 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
9a81a637 135
b4f86fcc
IM
136 struct hmap offloaded_flows OVS_GUARDED;
137
9a81a637
IM
138 /* The following properties are for dummy-pmd and they cannot be changed
139 * when a device is running, so we remember the request and update them
140 * next time netdev_dummy_reconfigure() is called. */
d537e73a
DDP
141 int requested_n_txq OVS_GUARDED;
142 int requested_n_rxq OVS_GUARDED;
143 int requested_numa_id OVS_GUARDED;
614c4892
BP
144};
145
e34cfdd9
BP
146/* Max 'recv_queue_len' in struct netdev_dummy. */
147#define NETDEV_DUMMY_MAX_QUEUE 100
148
f7791740
PS
149struct netdev_rxq_dummy {
150 struct netdev_rxq up;
ca6ba700
TG
151 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
152 struct ovs_list recv_queue;
417e7e66 153 int recv_queue_len; /* ovs_list_size(&recv_queue). */
98045eda 154 struct seq *seq; /* Reports newly queued packets. */
614c4892
BP
155};
156
95d4ec33 157static unixctl_cb_func netdev_dummy_set_admin_state;
9dc63482 158static int netdev_dummy_construct(struct netdev *);
9a81a637 159static void netdev_dummy_queue_packet(struct netdev_dummy *,
4960f9ad 160 struct dp_packet *, struct flow *, int);
eab5611a 161
631486bd 162static void dummy_packet_stream_close(struct dummy_packet_stream *);
614c4892 163
8613db65
DDP
164static void pkt_list_delete(struct ovs_list *);
165
614c4892
BP
166static bool
167is_dummy_class(const struct netdev_class *class)
168{
9dc63482 169 return class->construct == netdev_dummy_construct;
614c4892
BP
170}
171
614c4892
BP
172static struct netdev_dummy *
173netdev_dummy_cast(const struct netdev *netdev)
174{
b5d57fc8 175 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
180c6d0b 176 return CONTAINER_OF(netdev, struct netdev_dummy, up);
614c4892
BP
177}
178
f7791740
PS
179static struct netdev_rxq_dummy *
180netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
796223f5 181{
9dc63482 182 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
f7791740 183 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
796223f5
BP
184}
185
eab5611a 186static void
631486bd 187dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
eab5611a 188{
631486bd
AZ
189 int rxbuf_size = stream ? 2048 : 0;
190 s->stream = stream;
cf62fa4c 191 dp_packet_init(&s->rxbuf, rxbuf_size);
417e7e66 192 ovs_list_init(&s->txq);
631486bd 193}
eab5611a 194
631486bd
AZ
195static struct dummy_packet_stream *
196dummy_packet_stream_create(struct stream *stream)
197{
198 struct dummy_packet_stream *s;
eab5611a 199
631486bd
AZ
200 s = xzalloc(sizeof *s);
201 dummy_packet_stream_init(s, stream);
86383816 202
631486bd
AZ
203 return s;
204}
eab5611a 205
631486bd
AZ
206static void
207dummy_packet_stream_wait(struct dummy_packet_stream *s)
208{
209 stream_run_wait(s->stream);
417e7e66 210 if (!ovs_list_is_empty(&s->txq)) {
631486bd
AZ
211 stream_send_wait(s->stream);
212 }
213 stream_recv_wait(s->stream);
214}
eab5611a 215
631486bd
AZ
216static void
217dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
218{
417e7e66 219 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
cf62fa4c 220 struct dp_packet *b;
8613db65 221 struct pkt_list_node *node;
eab5611a 222
cf62fa4c
PS
223 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
224 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
8613db65
DDP
225
226 node = xmalloc(sizeof *node);
227 node->pkt = b;
417e7e66 228 ovs_list_push_back(&s->txq, &node->list_node);
631486bd
AZ
229 }
230}
231
232static int
233dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
234{
235 int error = 0;
236 size_t n;
237
238 stream_run(s->stream);
239
417e7e66 240 if (!ovs_list_is_empty(&s->txq)) {
8613db65 241 struct pkt_list_node *txbuf_node;
cf62fa4c 242 struct dp_packet *txbuf;
631486bd
AZ
243 int retval;
244
417e7e66 245 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
8613db65 246 txbuf = txbuf_node->pkt;
cf62fa4c 247 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
c0c4982f 248
631486bd 249 if (retval > 0) {
cf62fa4c
PS
250 dp_packet_pull(txbuf, retval);
251 if (!dp_packet_size(txbuf)) {
417e7e66 252 ovs_list_remove(&txbuf_node->list_node);
8613db65 253 free(txbuf_node);
cf62fa4c 254 dp_packet_delete(txbuf);
eab5611a 255 }
631486bd
AZ
256 } else if (retval != -EAGAIN) {
257 error = -retval;
eab5611a 258 }
631486bd 259 }
86f1d032 260
631486bd 261 if (!error) {
cf62fa4c
PS
262 if (dp_packet_size(&s->rxbuf) < 2) {
263 n = 2 - dp_packet_size(&s->rxbuf);
631486bd
AZ
264 } else {
265 uint16_t frame_len;
266
cf62fa4c 267 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
631486bd
AZ
268 if (frame_len < ETH_HEADER_LEN) {
269 error = EPROTO;
270 n = 0;
271 } else {
cf62fa4c 272 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
631486bd
AZ
273 }
274 }
eab5611a 275 }
631486bd
AZ
276 if (!error) {
277 int retval;
278
cf62fa4c
PS
279 dp_packet_prealloc_tailroom(&s->rxbuf, n);
280 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
c0c4982f 281
631486bd 282 if (retval > 0) {
cf62fa4c
PS
283 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
284 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
285 dp_packet_pull(&s->rxbuf, 2);
631486bd 286 netdev_dummy_queue_packet(dev,
4960f9ad 287 dp_packet_clone(&s->rxbuf), NULL, 0);
cf62fa4c 288 dp_packet_clear(&s->rxbuf);
631486bd
AZ
289 }
290 } else if (retval != -EAGAIN) {
291 error = (retval < 0 ? -retval
cf62fa4c 292 : dp_packet_size(&s->rxbuf) ? EPROTO
631486bd
AZ
293 : EOF);
294 }
295 }
296
297 return error;
eab5611a
BP
298}
299
300static void
631486bd 301dummy_packet_stream_close(struct dummy_packet_stream *s)
eab5611a
BP
302{
303 stream_close(s->stream);
cf62fa4c 304 dp_packet_uninit(&s->rxbuf);
8613db65 305 pkt_list_delete(&s->txq);
eab5611a
BP
306}
307
308static void
631486bd 309dummy_packet_conn_init(struct dummy_packet_conn *conn)
eab5611a 310{
631486bd
AZ
311 memset(conn, 0, sizeof *conn);
312 conn->type = NONE;
313}
eab5611a 314
631486bd
AZ
315static void
316dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
317{
eab5611a 318
631486bd
AZ
319 switch (conn->type) {
320 case PASSIVE:
fa37affa 321 smap_add(args, "pstream", pstream_get_name(conn->pconn.pstream));
631486bd
AZ
322 break;
323
324 case ACTIVE:
fa37affa 325 smap_add(args, "stream", stream_get_name(conn->rconn.rstream->stream));
631486bd
AZ
326 break;
327
328 case NONE:
329 default:
330 break;
331 }
332}
333
334static void
335dummy_packet_conn_close(struct dummy_packet_conn *conn)
336{
337 int i;
fa37affa
BP
338 struct dummy_packet_pconn *pconn = &conn->pconn;
339 struct dummy_packet_rconn *rconn = &conn->rconn;
631486bd
AZ
340
341 switch (conn->type) {
342 case PASSIVE:
343 pstream_close(pconn->pstream);
344 for (i = 0; i < pconn->n_streams; i++) {
7778360b
LR
345 dummy_packet_stream_close(pconn->streams[i]);
346 free(pconn->streams[i]);
631486bd
AZ
347 }
348 free(pconn->streams);
349 pconn->pstream = NULL;
350 pconn->streams = NULL;
351 break;
352
353 case ACTIVE:
354 dummy_packet_stream_close(rconn->rstream);
355 free(rconn->rstream);
356 rconn->rstream = NULL;
357 reconnect_destroy(rconn->reconnect);
358 rconn->reconnect = NULL;
359 break;
360
361 case NONE:
362 default:
363 break;
364 }
365
366 conn->type = NONE;
367 memset(conn, 0, sizeof *conn);
368}
369
370static void
371dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
372 const struct smap *args)
373{
374 const char *pstream = smap_get(args, "pstream");
375 const char *stream = smap_get(args, "stream");
376
377 if (pstream && stream) {
378 VLOG_WARN("Open failed: both %s and %s are configured",
379 pstream, stream);
380 return;
381 }
382
383 switch (conn->type) {
384 case PASSIVE:
966904d2 385 if (pstream &&
fa37affa 386 !strcmp(pstream_get_name(conn->pconn.pstream), pstream)) {
631486bd
AZ
387 return;
388 }
389 dummy_packet_conn_close(conn);
390 break;
391 case ACTIVE:
966904d2 392 if (stream &&
fa37affa 393 !strcmp(stream_get_name(conn->rconn.rstream->stream), stream)) {
631486bd
AZ
394 return;
395 }
396 dummy_packet_conn_close(conn);
397 break;
398 case NONE:
399 default:
400 break;
401 }
402
403 if (pstream) {
404 int error;
405
fa37affa 406 error = pstream_open(pstream, &conn->pconn.pstream, DSCP_DEFAULT);
631486bd
AZ
407 if (error) {
408 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
409 } else {
410 conn->type = PASSIVE;
411 }
412 }
413
414 if (stream) {
415 int error;
416 struct stream *active_stream;
ea53e3a8 417 struct reconnect *reconnect;
631486bd
AZ
418
419 reconnect = reconnect_create(time_msec());
420 reconnect_set_name(reconnect, stream);
421 reconnect_set_passive(reconnect, false, time_msec());
422 reconnect_enable(reconnect, time_msec());
fc24d64d 423 reconnect_set_backoff(reconnect, 100, INT_MAX);
13af13b3 424 reconnect_set_probe_interval(reconnect, 0);
fa37affa 425 conn->rconn.reconnect = reconnect;
c0c4982f 426 conn->type = ACTIVE;
631486bd
AZ
427
428 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
fa37affa 429 conn->rconn.rstream = dummy_packet_stream_create(active_stream);
631486bd
AZ
430
431 switch (error) {
432 case 0:
c0c4982f 433 reconnect_connected(reconnect, time_msec());
631486bd
AZ
434 break;
435
436 case EAGAIN:
c0c4982f 437 reconnect_connecting(reconnect, time_msec());
631486bd
AZ
438 break;
439
440 default:
c0c4982f 441 reconnect_connect_failed(reconnect, time_msec(), error);
631486bd 442 stream_close(active_stream);
fa37affa 443 conn->rconn.rstream->stream = NULL;
631486bd
AZ
444 break;
445 }
446 }
447}
448
449static void
450dummy_pconn_run(struct netdev_dummy *dev)
451 OVS_REQUIRES(dev->mutex)
452{
453 struct stream *new_stream;
fa37affa 454 struct dummy_packet_pconn *pconn = &dev->conn.pconn;
631486bd
AZ
455 int error;
456 size_t i;
457
458 error = pstream_accept(pconn->pstream, &new_stream);
459 if (!error) {
460 struct dummy_packet_stream *s;
461
462 pconn->streams = xrealloc(pconn->streams,
463 ((pconn->n_streams + 1)
7778360b
LR
464 * sizeof s));
465 s = xmalloc(sizeof *s);
466 pconn->streams[pconn->n_streams++] = s;
631486bd
AZ
467 dummy_packet_stream_init(s, new_stream);
468 } else if (error != EAGAIN) {
469 VLOG_WARN("%s: accept failed (%s)",
470 pstream_get_name(pconn->pstream), ovs_strerror(error));
471 pstream_close(pconn->pstream);
472 pconn->pstream = NULL;
473 dev->conn.type = NONE;
474 }
475
7778360b
LR
476 for (i = 0; i < pconn->n_streams; ) {
477 struct dummy_packet_stream *s = pconn->streams[i];
631486bd
AZ
478
479 error = dummy_packet_stream_run(dev, s);
480 if (error) {
481 VLOG_DBG("%s: closing connection (%s)",
482 stream_get_name(s->stream),
483 ovs_retval_to_string(error));
484 dummy_packet_stream_close(s);
7778360b 485 free(s);
631486bd 486 pconn->streams[i] = pconn->streams[--pconn->n_streams];
7778360b
LR
487 } else {
488 i++;
eab5611a 489 }
631486bd
AZ
490 }
491}
eab5611a 492
631486bd
AZ
493static void
494dummy_rconn_run(struct netdev_dummy *dev)
495OVS_REQUIRES(dev->mutex)
496{
fa37affa 497 struct dummy_packet_rconn *rconn = &dev->conn.rconn;
631486bd
AZ
498
499 switch (reconnect_run(rconn->reconnect, time_msec())) {
500 case RECONNECT_CONNECT:
501 {
c0c4982f
AZ
502 int error;
503
504 if (rconn->rstream->stream) {
505 error = stream_connect(rconn->rstream->stream);
506 } else {
507 error = stream_open(reconnect_get_name(rconn->reconnect),
508 &rconn->rstream->stream, DSCP_DEFAULT);
509 }
631486bd 510
c0c4982f
AZ
511 switch (error) {
512 case 0:
631486bd 513 reconnect_connected(rconn->reconnect, time_msec());
631486bd
AZ
514 break;
515
516 case EAGAIN:
517 reconnect_connecting(rconn->reconnect, time_msec());
c0c4982f 518 break;
631486bd
AZ
519
520 default:
c0c4982f 521 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
631486bd 522 stream_close(rconn->rstream->stream);
c0c4982f
AZ
523 rconn->rstream->stream = NULL;
524 break;
eab5611a 525 }
eab5611a 526 }
631486bd
AZ
527 break;
528
529 case RECONNECT_DISCONNECT:
530 case RECONNECT_PROBE:
531 default:
532 break;
533 }
534
535 if (reconnect_is_connected(rconn->reconnect)) {
536 int err;
537
538 err = dummy_packet_stream_run(dev, rconn->rstream);
539
540 if (err) {
541 reconnect_disconnected(rconn->reconnect, time_msec(), err);
542 stream_close(rconn->rstream->stream);
c0c4982f 543 rconn->rstream->stream = NULL;
631486bd
AZ
544 }
545 }
546}
547
548static void
549dummy_packet_conn_run(struct netdev_dummy *dev)
550 OVS_REQUIRES(dev->mutex)
551{
552 switch (dev->conn.type) {
553 case PASSIVE:
554 dummy_pconn_run(dev);
555 break;
556
557 case ACTIVE:
558 dummy_rconn_run(dev);
559 break;
560
561 case NONE:
562 default:
563 break;
564 }
565}
566
567static void
568dummy_packet_conn_wait(struct dummy_packet_conn *conn)
569{
570 int i;
571 switch (conn->type) {
572 case PASSIVE:
fa37affa
BP
573 pstream_wait(conn->pconn.pstream);
574 for (i = 0; i < conn->pconn.n_streams; i++) {
575 struct dummy_packet_stream *s = conn->pconn.streams[i];
631486bd
AZ
576 dummy_packet_stream_wait(s);
577 }
578 break;
579 case ACTIVE:
fa37affa
BP
580 if (reconnect_is_connected(conn->rconn.reconnect)) {
581 dummy_packet_stream_wait(conn->rconn.rstream);
c0c4982f 582 }
631486bd
AZ
583 break;
584
585 case NONE:
586 default:
587 break;
588 }
589}
590
591static void
592dummy_packet_conn_send(struct dummy_packet_conn *conn,
593 const void *buffer, size_t size)
594{
595 int i;
596
597 switch (conn->type) {
598 case PASSIVE:
fa37affa
BP
599 for (i = 0; i < conn->pconn.n_streams; i++) {
600 struct dummy_packet_stream *s = conn->pconn.streams[i];
631486bd
AZ
601
602 dummy_packet_stream_send(s, buffer, size);
fa37affa 603 pstream_wait(conn->pconn.pstream);
631486bd
AZ
604 }
605 break;
606
607 case ACTIVE:
fa37affa
BP
608 if (reconnect_is_connected(conn->rconn.reconnect)) {
609 dummy_packet_stream_send(conn->rconn.rstream, buffer, size);
610 dummy_packet_stream_wait(conn->rconn.rstream);
c0c4982f 611 }
631486bd
AZ
612 break;
613
614 case NONE:
615 default:
616 break;
617 }
618}
619
7d7fffe8
AZ
620static enum dummy_netdev_conn_state
621dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
622{
623 enum dummy_netdev_conn_state state;
624
625 if (conn->type == ACTIVE) {
fa37affa 626 if (reconnect_is_connected(conn->rconn.reconnect)) {
7d7fffe8
AZ
627 state = CONN_STATE_CONNECTED;
628 } else {
629 state = CONN_STATE_NOT_CONNECTED;
630 }
631 } else {
632 state = CONN_STATE_UNKNOWN;
633 }
634
635 return state;
636}
637
631486bd 638static void
e98d0cb3 639netdev_dummy_run(const struct netdev_class *netdev_class)
631486bd
AZ
640{
641 struct netdev_dummy *dev;
642
643 ovs_mutex_lock(&dummy_list_mutex);
644 LIST_FOR_EACH (dev, list_node, &dummy_list) {
e98d0cb3
DDP
645 if (netdev_get_class(&dev->up) != netdev_class) {
646 continue;
647 }
631486bd
AZ
648 ovs_mutex_lock(&dev->mutex);
649 dummy_packet_conn_run(dev);
650 ovs_mutex_unlock(&dev->mutex);
651 }
652 ovs_mutex_unlock(&dummy_list_mutex);
653}
654
655static void
e98d0cb3 656netdev_dummy_wait(const struct netdev_class *netdev_class)
631486bd
AZ
657{
658 struct netdev_dummy *dev;
659
660 ovs_mutex_lock(&dummy_list_mutex);
661 LIST_FOR_EACH (dev, list_node, &dummy_list) {
e98d0cb3
DDP
662 if (netdev_get_class(&dev->up) != netdev_class) {
663 continue;
664 }
631486bd
AZ
665 ovs_mutex_lock(&dev->mutex);
666 dummy_packet_conn_wait(&dev->conn);
86383816 667 ovs_mutex_unlock(&dev->mutex);
eab5611a 668 }
5e1de67f 669 ovs_mutex_unlock(&dummy_list_mutex);
eab5611a
BP
670}
671
9dc63482
BP
672static struct netdev *
673netdev_dummy_alloc(void)
674{
675 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
676 return &netdev->up;
677}
678
614c4892 679static int
9dc63482 680netdev_dummy_construct(struct netdev *netdev_)
614c4892 681{
97e5b2e5 682 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
9dc63482 683 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
01cdb3a1
BP
684 unsigned int n;
685
97e5b2e5 686 n = atomic_count_inc(&next_n);
86383816 687
834d6caf 688 ovs_mutex_init(&netdev->mutex);
36e2140d 689 ovs_mutex_lock(&netdev->mutex);
74ff3298
JR
690 netdev->hwaddr.ea[0] = 0xaa;
691 netdev->hwaddr.ea[1] = 0x55;
692 netdev->hwaddr.ea[2] = n >> 24;
693 netdev->hwaddr.ea[3] = n >> 16;
694 netdev->hwaddr.ea[4] = n >> 8;
695 netdev->hwaddr.ea[5] = n;
b5d57fc8 696 netdev->mtu = 1500;
c9c8c8a1 697 netdev->flags = NETDEV_UP;
b5d57fc8 698 netdev->ifindex = -EOPNOTSUPP;
9a81a637
IM
699 netdev->requested_n_rxq = netdev_->n_rxq;
700 netdev->requested_n_txq = netdev_->n_txq;
d537e73a 701 netdev->numa_id = 0;
eab5611a 702
971f4b39
MW
703 memset(&netdev->custom_stats, 0, sizeof(netdev->custom_stats));
704
705 ovs_strlcpy(netdev->custom_stats[0].name,
706 "rx_custom_packets_1", NETDEV_CUSTOM_STATS_NAME_SIZE);
707 ovs_strlcpy(netdev->custom_stats[1].name,
708 "rx_custom_packets_2", NETDEV_CUSTOM_STATS_NAME_SIZE);
709
631486bd 710 dummy_packet_conn_init(&netdev->conn);
eab5611a 711
417e7e66 712 ovs_list_init(&netdev->rxes);
b4f86fcc 713 hmap_init(&netdev->offloaded_flows);
36e2140d 714 ovs_mutex_unlock(&netdev->mutex);
b5d57fc8 715
5e1de67f 716 ovs_mutex_lock(&dummy_list_mutex);
417e7e66 717 ovs_list_push_back(&dummy_list, &netdev->list_node);
5e1de67f
BP
718 ovs_mutex_unlock(&dummy_list_mutex);
719
614c4892
BP
720 return 0;
721}
722
723static void
9dc63482 724netdev_dummy_destruct(struct netdev *netdev_)
614c4892 725{
b5d57fc8 726 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
b4f86fcc 727 struct offloaded_flow *off_flow;
614c4892 728
5e1de67f 729 ovs_mutex_lock(&dummy_list_mutex);
417e7e66 730 ovs_list_remove(&netdev->list_node);
5e1de67f
BP
731 ovs_mutex_unlock(&dummy_list_mutex);
732
36e2140d 733 ovs_mutex_lock(&netdev->mutex);
360990eb 734 if (netdev->rxq_pcap) {
b6e840ae 735 ovs_pcap_close(netdev->rxq_pcap);
360990eb
BP
736 }
737 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
b6e840ae 738 ovs_pcap_close(netdev->tx_pcap);
360990eb 739 }
631486bd
AZ
740 dummy_packet_conn_close(&netdev->conn);
741 netdev->conn.type = NONE;
742
b4f86fcc
IM
743 HMAP_FOR_EACH_POP (off_flow, node, &netdev->offloaded_flows) {
744 free(off_flow);
745 }
746 hmap_destroy(&netdev->offloaded_flows);
747
36e2140d 748 ovs_mutex_unlock(&netdev->mutex);
86383816 749 ovs_mutex_destroy(&netdev->mutex);
9dc63482
BP
750}
751
752static void
753netdev_dummy_dealloc(struct netdev *netdev_)
754{
755 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
756
b5d57fc8 757 free(netdev);
614c4892
BP
758}
759
8073dd31 760static int
9a81a637 761netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
8073dd31 762{
9a81a637 763 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
8073dd31 764
5a9bf514 765 ovs_mutex_lock(&netdev->mutex);
55d87b06 766
b5d57fc8
BP
767 if (netdev->ifindex >= 0) {
768 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
8073dd31 769 }
55d87b06 770
631486bd 771 dummy_packet_conn_get_config(&netdev->conn, args);
5a9bf514 772
9a81a637
IM
773 /* 'dummy-pmd' specific config. */
774 if (!netdev_is_pmd(dev)) {
775 goto exit;
776 }
777 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
778 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
779 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
780 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
781
782exit:
55d87b06 783 ovs_mutex_unlock(&netdev->mutex);
8073dd31
NM
784 return 0;
785}
786
2af602f2 787static int
a8704b50
PS
788netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
789 struct in6_addr **pmask, int *n_addr)
2af602f2
TLSC
790{
791 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
a8704b50
PS
792 int cnt = 0, i = 0, err = 0;
793 struct in6_addr *addr, *mask;
2af602f2
TLSC
794
795 ovs_mutex_lock(&netdev->mutex);
a8704b50
PS
796 if (netdev->address.s_addr != INADDR_ANY) {
797 cnt++;
798 }
799
800 if (ipv6_addr_is_set(&netdev->ipv6)) {
801 cnt++;
802 }
803 if (!cnt) {
804 err = EADDRNOTAVAIL;
805 goto out;
806 }
807 addr = xmalloc(sizeof *addr * cnt);
808 mask = xmalloc(sizeof *mask * cnt);
809 if (netdev->address.s_addr != INADDR_ANY) {
810 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
811 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
812 i++;
813 }
814
815 if (ipv6_addr_is_set(&netdev->ipv6)) {
816 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
817 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
818 i++;
819 }
820 if (paddr) {
821 *paddr = addr;
822 *pmask = mask;
823 *n_addr = cnt;
824 } else {
825 free(addr);
826 free(mask);
827 }
828out:
2af602f2
TLSC
829 ovs_mutex_unlock(&netdev->mutex);
830
a8704b50 831 return err;
2af602f2
TLSC
832}
833
a36de779
PS
834static int
835netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
836 struct in_addr netmask)
837{
838 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
839
840 ovs_mutex_lock(&netdev->mutex);
841 netdev->address = address;
842 netdev->netmask = netmask;
d2b11b5b 843 netdev_change_seq_changed(netdev_);
a36de779
PS
844 ovs_mutex_unlock(&netdev->mutex);
845
846 return 0;
847}
848
2af602f2 849static int
a8704b50
PS
850netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
851 struct in6_addr *mask)
2af602f2
TLSC
852{
853 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
854
855 ovs_mutex_lock(&netdev->mutex);
856 netdev->ipv6 = *in6;
a8704b50 857 netdev->ipv6_mask = *mask;
d2b11b5b 858 netdev_change_seq_changed(netdev_);
2af602f2
TLSC
859 ovs_mutex_unlock(&netdev->mutex);
860
861 return 0;
862}
863
bf9f6f80 864#define DUMMY_MAX_QUEUES_PER_PORT 1024
865
8073dd31 866static int
9fff138e
DDP
867netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args,
868 char **errp OVS_UNUSED)
614c4892 869{
b5d57fc8 870 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
55d87b06 871 const char *pcap;
d66e4a5e 872 int new_n_rxq, new_n_txq, new_numa_id;
614c4892 873
86383816 874 ovs_mutex_lock(&netdev->mutex);
b5d57fc8 875 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
eab5611a 876
631486bd 877 dummy_packet_conn_set_config(&netdev->conn, args);
55d87b06 878
f7791740 879 if (netdev->rxq_pcap) {
b6e840ae 880 ovs_pcap_close(netdev->rxq_pcap);
55d87b06 881 }
f7791740 882 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
b6e840ae 883 ovs_pcap_close(netdev->tx_pcap);
55d87b06 884 }
f7791740 885 netdev->rxq_pcap = netdev->tx_pcap = NULL;
55d87b06
BP
886 pcap = smap_get(args, "pcap");
887 if (pcap) {
f7791740 888 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
55d87b06 889 } else {
f7791740 890 const char *rxq_pcap = smap_get(args, "rxq_pcap");
55d87b06
BP
891 const char *tx_pcap = smap_get(args, "tx_pcap");
892
f7791740
PS
893 if (rxq_pcap) {
894 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
55d87b06
BP
895 }
896 if (tx_pcap) {
50aa0364 897 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
55d87b06
BP
898 }
899 }
900
9a81a637
IM
901 netdev_change_seq_changed(netdev_);
902
903 /* 'dummy-pmd' specific config. */
904 if (!netdev_->netdev_class->is_pmd) {
905 goto exit;
906 }
907
cce57f8d 908 new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
909 new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1);
bf9f6f80 910
911 if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT ||
912 new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) {
913 VLOG_WARN("The one or both of interface %s queues"
914 "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
915 netdev_get_name(netdev_),
916 new_n_rxq,
917 new_n_txq,
918 DUMMY_MAX_QUEUES_PER_PORT,
919 DUMMY_MAX_QUEUES_PER_PORT);
920
921 new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq);
922 new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq);
923 }
924
d537e73a
DDP
925 new_numa_id = smap_get_int(args, "numa_id", 0);
926 if (new_n_rxq != netdev->requested_n_rxq
d66e4a5e 927 || new_n_txq != netdev->requested_n_txq
d537e73a 928 || new_numa_id != netdev->requested_numa_id) {
9a81a637 929 netdev->requested_n_rxq = new_n_rxq;
d66e4a5e 930 netdev->requested_n_txq = new_n_txq;
d537e73a 931 netdev->requested_numa_id = new_numa_id;
9a81a637
IM
932 netdev_request_reconfigure(netdev_);
933 }
86383816 934
9a81a637
IM
935exit:
936 ovs_mutex_unlock(&netdev->mutex);
614c4892
BP
937 return 0;
938}
939
f9176a3a 940static int
d537e73a 941netdev_dummy_get_numa_id(const struct netdev *netdev_)
f9176a3a 942{
d537e73a
DDP
943 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
944
945 ovs_mutex_lock(&netdev->mutex);
946 int numa_id = netdev->numa_id;
947 ovs_mutex_unlock(&netdev->mutex);
948
949 return numa_id;
f9176a3a
IM
950}
951
9a81a637
IM
952/* Sets the number of tx queues and rx queues for the dummy PMD interface. */
953static int
954netdev_dummy_reconfigure(struct netdev *netdev_)
955{
956 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
957
958 ovs_mutex_lock(&netdev->mutex);
959
960 netdev_->n_txq = netdev->requested_n_txq;
961 netdev_->n_rxq = netdev->requested_n_rxq;
d537e73a 962 netdev->numa_id = netdev->requested_numa_id;
9a81a637
IM
963
964 ovs_mutex_unlock(&netdev->mutex);
965 return 0;
966}
967
f7791740
PS
968static struct netdev_rxq *
969netdev_dummy_rxq_alloc(void)
9dc63482 970{
f7791740 971 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
9dc63482
BP
972 return &rx->up;
973}
974
7b6b0ef4 975static int
f7791740 976netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
7b6b0ef4 977{
f7791740 978 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
9dc63482 979 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
796223f5 980
86383816 981 ovs_mutex_lock(&netdev->mutex);
417e7e66
BW
982 ovs_list_push_back(&netdev->rxes, &rx->node);
983 ovs_list_init(&rx->recv_queue);
e34cfdd9 984 rx->recv_queue_len = 0;
98045eda 985 rx->seq = seq_create();
86383816 986 ovs_mutex_unlock(&netdev->mutex);
796223f5 987
7b6b0ef4
BP
988 return 0;
989}
990
9dc63482 991static void
f7791740 992netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
9dc63482 993{
f7791740 994 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
86383816 995 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
9dc63482 996
86383816 997 ovs_mutex_lock(&netdev->mutex);
417e7e66 998 ovs_list_remove(&rx->node);
8613db65 999 pkt_list_delete(&rx->recv_queue);
86383816 1000 ovs_mutex_unlock(&netdev->mutex);
98045eda 1001 seq_destroy(rx->seq);
9dc63482
BP
1002}
1003
1004static void
f7791740 1005netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
9dc63482 1006{
f7791740 1007 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
9dc63482
BP
1008
1009 free(rx);
1010}
1011
7b6b0ef4 1012static int
8492adc2
JS
1013netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch,
1014 int *qfill)
fbac791a 1015{
f7791740 1016 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
86383816 1017 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
cf62fa4c 1018 struct dp_packet *packet;
fbac791a 1019
86383816 1020 ovs_mutex_lock(&netdev->mutex);
417e7e66 1021 if (!ovs_list_is_empty(&rx->recv_queue)) {
8613db65
DDP
1022 struct pkt_list_node *pkt_node;
1023
417e7e66 1024 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
8613db65
DDP
1025 packet = pkt_node->pkt;
1026 free(pkt_node);
86383816
BP
1027 rx->recv_queue_len--;
1028 } else {
1029 packet = NULL;
1030 }
1031 ovs_mutex_unlock(&netdev->mutex);
1032
1033 if (!packet) {
f8cf6502
DDP
1034 if (netdev_is_pmd(&netdev->up)) {
1035 /* If 'netdev' is a PMD device, this is called as part of the PMD
1036 * thread busy loop. We yield here (without quiescing) for two
1037 * reasons:
1038 *
1039 * - To reduce the CPU utilization during the testsuite
1040 * - To give valgrind a chance to switch thread. According
1041 * to the valgrind documentation, there's a big lock that
1042 * prevents multiple thread from being executed at the same
1043 * time. On my system, without this sleep, the pmd threads
1044 * testcases fail under valgrind, because ovs-vswitchd becomes
1045 * unresponsive. */
1046 sched_yield();
1047 }
bfd3367b 1048 return EAGAIN;
fbac791a 1049 }
df1e5a3b
PS
1050 ovs_mutex_lock(&netdev->mutex);
1051 netdev->stats.rx_packets++;
cf62fa4c 1052 netdev->stats.rx_bytes += dp_packet_size(packet);
971f4b39
MW
1053 netdev->custom_stats[0].value++;
1054 netdev->custom_stats[1].value++;
df1e5a3b 1055 ovs_mutex_unlock(&netdev->mutex);
fbac791a 1056
1270b6e5 1057 dp_packet_batch_init_packet(batch, packet);
8492adc2
JS
1058
1059 if (qfill) {
1060 *qfill = -ENOTSUP;
1061 }
1062
df1e5a3b 1063 return 0;
fbac791a
BP
1064}
1065
1066static void
f7791740 1067netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
796223f5 1068{
f7791740 1069 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
86383816 1070 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
98045eda 1071 uint64_t seq = seq_read(rx->seq);
86383816
BP
1072
1073 ovs_mutex_lock(&netdev->mutex);
417e7e66 1074 if (!ovs_list_is_empty(&rx->recv_queue)) {
fbac791a 1075 poll_immediate_wake();
98045eda
BP
1076 } else {
1077 seq_wait(rx->seq, seq);
fbac791a 1078 }
86383816 1079 ovs_mutex_unlock(&netdev->mutex);
fbac791a
BP
1080}
1081
1082static int
f7791740 1083netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
fbac791a 1084{
f7791740 1085 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
86383816
BP
1086 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1087
1088 ovs_mutex_lock(&netdev->mutex);
8613db65 1089 pkt_list_delete(&rx->recv_queue);
e34cfdd9 1090 rx->recv_queue_len = 0;
86383816
BP
1091 ovs_mutex_unlock(&netdev->mutex);
1092
98045eda
BP
1093 seq_change(rx->seq);
1094
fbac791a 1095 return 0;
7b6b0ef4
BP
1096}
1097
02d5bfe3 1098static int
f00fa8cb 1099netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
b30896c9 1100 struct dp_packet_batch *batch,
324c8374 1101 bool concurrent_txq OVS_UNUSED)
02d5bfe3 1102{
b5d57fc8 1103 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
f4fd623c 1104 int error = 0;
eab5611a 1105
72c84bc2 1106 struct dp_packet *packet;
e883448e 1107 DP_PACKET_BATCH_FOR_EACH(i, packet, batch) {
72c84bc2 1108 const void *buffer = dp_packet_data(packet);
ad8b0b4f 1109 size_t size = dp_packet_size(packet);
eab5611a 1110
940ac2ce 1111 if (!dp_packet_is_eth(packet)) {
2482b0b0
JS
1112 error = EPFNOSUPPORT;
1113 break;
1114 }
1115
f4fd623c
DDP
1116 if (size < ETH_HEADER_LEN) {
1117 error = EMSGSIZE;
1118 break;
1119 } else {
1120 const struct eth_header *eth = buffer;
1121 int max_size;
36e2140d 1122
f4fd623c
DDP
1123 ovs_mutex_lock(&dev->mutex);
1124 max_size = dev->mtu + ETH_HEADER_LEN;
1125 ovs_mutex_unlock(&dev->mutex);
1126
1127 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1128 max_size += VLAN_HEADER_LEN;
1129 }
1130 if (size > max_size) {
1131 error = EMSGSIZE;
1132 break;
1133 }
eab5611a 1134 }
02d5bfe3 1135
f4fd623c
DDP
1136 ovs_mutex_lock(&dev->mutex);
1137 dev->stats.tx_packets++;
1138 dev->stats.tx_bytes += size;
1139
1140 dummy_packet_conn_send(&dev->conn, buffer, size);
02d5bfe3 1141
bde96a9a
BP
1142 /* Reply to ARP requests for 'dev''s assigned IP address. */
1143 if (dev->address.s_addr) {
71f21279 1144 struct dp_packet dp;
bde96a9a
BP
1145 struct flow flow;
1146
71f21279
BP
1147 dp_packet_use_const(&dp, buffer, size);
1148 flow_extract(&dp, &flow);
bde96a9a
BP
1149 if (flow.dl_type == htons(ETH_TYPE_ARP)
1150 && flow.nw_proto == ARP_OP_REQUEST
1151 && flow.nw_dst == dev->address.s_addr) {
1152 struct dp_packet *reply = dp_packet_new(0);
1153 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1154 false, flow.nw_dst, flow.nw_src);
4960f9ad 1155 netdev_dummy_queue_packet(dev, reply, NULL, 0);
bde96a9a
BP
1156 }
1157 }
1158
f4fd623c 1159 if (dev->tx_pcap) {
71f21279 1160 struct dp_packet dp;
631486bd 1161
71f21279
BP
1162 dp_packet_use_const(&dp, buffer, size);
1163 ovs_pcap_write(dev->tx_pcap, &dp);
f4fd623c 1164 }
55d87b06 1165
f4fd623c 1166 ovs_mutex_unlock(&dev->mutex);
55d87b06
BP
1167 }
1168
b30896c9 1169 dp_packet_delete_batch(batch, true);
eab5611a 1170
f4fd623c 1171 return error;
02d5bfe3
BP
1172}
1173
614c4892 1174static int
74ff3298 1175netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
614c4892 1176{
b5d57fc8 1177 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
614c4892 1178
86383816 1179 ovs_mutex_lock(&dev->mutex);
614c4892 1180 if (!eth_addr_equals(dev->hwaddr, mac)) {
74ff3298 1181 dev->hwaddr = mac;
3e912ffc 1182 netdev_change_seq_changed(netdev);
614c4892 1183 }
86383816 1184 ovs_mutex_unlock(&dev->mutex);
614c4892
BP
1185
1186 return 0;
1187}
1188
1189static int
74ff3298 1190netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
614c4892 1191{
86383816 1192 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
614c4892 1193
86383816 1194 ovs_mutex_lock(&dev->mutex);
74ff3298 1195 *mac = dev->hwaddr;
86383816
BP
1196 ovs_mutex_unlock(&dev->mutex);
1197
614c4892
BP
1198 return 0;
1199}
1200
1201static int
1202netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1203{
86383816 1204 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
614c4892 1205
86383816 1206 ovs_mutex_lock(&dev->mutex);
614c4892 1207 *mtup = dev->mtu;
86383816
BP
1208 ovs_mutex_unlock(&dev->mutex);
1209
614c4892
BP
1210 return 0;
1211}
1212
56edfb18 1213#define DUMMY_MIN_MTU 68
1214#define DUMMY_MAX_MTU 65535
1215
9b020780 1216static int
4124cb12 1217netdev_dummy_set_mtu(struct netdev *netdev, int mtu)
9b020780 1218{
56edfb18 1219 if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) {
1220 return EINVAL;
1221 }
1222
b5d57fc8 1223 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
9b020780 1224
86383816 1225 ovs_mutex_lock(&dev->mutex);
ae59d134
DDP
1226 if (dev->mtu != mtu) {
1227 dev->mtu = mtu;
1228 netdev_change_seq_changed(netdev);
1229 }
86383816
BP
1230 ovs_mutex_unlock(&dev->mutex);
1231
9b020780
PS
1232 return 0;
1233}
1234
614c4892
BP
1235static int
1236netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1237{
86383816 1238 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
614c4892 1239
86383816 1240 ovs_mutex_lock(&dev->mutex);
d6e3feb5 1241 /* Passing only collected counters */
1242 stats->tx_packets = dev->stats.tx_packets;
1243 stats->tx_bytes = dev->stats.tx_bytes;
1244 stats->rx_packets = dev->stats.rx_packets;
1245 stats->rx_bytes = dev->stats.rx_bytes;
86383816
BP
1246 ovs_mutex_unlock(&dev->mutex);
1247
614c4892
BP
1248 return 0;
1249}
1250
971f4b39
MW
1251static int
1252netdev_dummy_get_custom_stats(const struct netdev *netdev,
1253 struct netdev_custom_stats *custom_stats)
1254{
1255 int i;
1256
1257 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1258
1259 custom_stats->size = 2;
1260 custom_stats->counters =
1261 (struct netdev_custom_counter *) xcalloc(C_STATS_SIZE,
1262 sizeof(struct netdev_custom_counter));
1263
ae9f2ce7 1264 ovs_mutex_lock(&dev->mutex);
971f4b39
MW
1265 for (i = 0 ; i < C_STATS_SIZE ; i++) {
1266 custom_stats->counters[i].value = dev->custom_stats[i].value;
1267 ovs_strlcpy(custom_stats->counters[i].name,
1268 dev->custom_stats[i].name,
1269 NETDEV_CUSTOM_STATS_NAME_SIZE);
1270 }
ae9f2ce7 1271 ovs_mutex_unlock(&dev->mutex);
971f4b39
MW
1272
1273 return 0;
1274}
1275
f72dff1a
BP
1276static int
1277netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1278 unsigned int queue_id, struct smap *details OVS_UNUSED)
1279{
1280 if (queue_id == 0) {
1281 return 0;
1282 } else {
1283 return EINVAL;
1284 }
1285}
1286
1287static void
1288netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1289{
1290 *stats = (struct netdev_queue_stats) {
1291 .tx_bytes = UINT64_MAX,
1292 .tx_packets = UINT64_MAX,
1293 .tx_errors = UINT64_MAX,
1294 .created = LLONG_MIN,
1295 };
1296}
1297
1298static int
1299netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1300 unsigned int queue_id,
1301 struct netdev_queue_stats *stats)
1302{
1303 if (queue_id == 0) {
1304 netdev_dummy_init_queue_stats(stats);
1305 return 0;
1306 } else {
1307 return EINVAL;
1308 }
1309}
1310
1311struct netdev_dummy_queue_state {
1312 unsigned int next_queue;
1313};
1314
1315static int
1316netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1317 void **statep)
1318{
1319 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1320 state->next_queue = 0;
1321 *statep = state;
1322 return 0;
1323}
1324
1325static int
1326netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1327 void *state_,
1328 unsigned int *queue_id,
1329 struct smap *details OVS_UNUSED)
1330{
1331 struct netdev_dummy_queue_state *state = state_;
1332 if (state->next_queue == 0) {
1333 *queue_id = 0;
1334 state->next_queue++;
1335 return 0;
1336 } else {
1337 return EOF;
1338 }
1339}
1340
1341static int
1342netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1343 void *state)
1344{
1345 free(state);
1346 return 0;
1347}
1348
1349static int
1350netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1351 void (*cb)(unsigned int queue_id,
1352 struct netdev_queue_stats *,
1353 void *aux),
1354 void *aux)
1355{
1356 struct netdev_queue_stats stats;
1357 netdev_dummy_init_queue_stats(&stats);
1358 cb(0, &stats, aux);
1359 return 0;
1360}
1361
8073dd31
NM
1362static int
1363netdev_dummy_get_ifindex(const struct netdev *netdev)
1364{
b5d57fc8 1365 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
86383816
BP
1366 int ifindex;
1367
1368 ovs_mutex_lock(&dev->mutex);
1369 ifindex = dev->ifindex;
1370 ovs_mutex_unlock(&dev->mutex);
8073dd31 1371
86383816 1372 return ifindex;
8073dd31
NM
1373}
1374
614c4892 1375static int
86383816
BP
1376netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1377 enum netdev_flags off, enum netdev_flags on,
1378 enum netdev_flags *old_flagsp)
1379 OVS_REQUIRES(netdev->mutex)
614c4892 1380{
614c4892
BP
1381 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1382 return EINVAL;
1383 }
1384
b5d57fc8
BP
1385 *old_flagsp = netdev->flags;
1386 netdev->flags |= on;
1387 netdev->flags &= ~off;
1388 if (*old_flagsp != netdev->flags) {
3e912ffc 1389 netdev_change_seq_changed(&netdev->up);
614c4892 1390 }
86383816 1391
614c4892
BP
1392 return 0;
1393}
1394
86383816
BP
1395static int
1396netdev_dummy_update_flags(struct netdev *netdev_,
1397 enum netdev_flags off, enum netdev_flags on,
1398 enum netdev_flags *old_flagsp)
1399{
1400 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1401 int error;
1402
1403 ovs_mutex_lock(&netdev->mutex);
1404 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1405 ovs_mutex_unlock(&netdev->mutex);
1406
1407 return error;
1408}
b4f86fcc
IM
1409
1410/* Flow offload API. */
1411static uint32_t
1412netdev_dummy_flow_hash(const ovs_u128 *ufid)
1413{
1414 return ufid->u32[0];
1415}
1416
1417static struct offloaded_flow *
1418find_offloaded_flow(const struct hmap *offloaded_flows, const ovs_u128 *ufid)
1419{
1420 uint32_t hash = netdev_dummy_flow_hash(ufid);
1421 struct offloaded_flow *data;
1422
1423 HMAP_FOR_EACH_WITH_HASH (data, node, hash, offloaded_flows) {
1424 if (ovs_u128_equals(*ufid, data->ufid)) {
1425 return data;
1426 }
1427 }
1428
1429 return NULL;
1430}
1431
1432static int
1433netdev_dummy_flow_put(struct netdev *netdev, struct match *match,
1434 struct nlattr *actions OVS_UNUSED,
1435 size_t actions_len OVS_UNUSED,
1436 const ovs_u128 *ufid, struct offload_info *info,
75ad1cd6 1437 struct dpif_flow_stats *stats)
b4f86fcc
IM
1438{
1439 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1440 struct offloaded_flow *off_flow;
fa059073 1441 bool modify = true;
b4f86fcc
IM
1442
1443 ovs_mutex_lock(&dev->mutex);
1444
1445 off_flow = find_offloaded_flow(&dev->offloaded_flows, ufid);
1446 if (!off_flow) {
1447 /* Create new offloaded flow. */
1448 off_flow = xzalloc(sizeof *off_flow);
1449 memcpy(&off_flow->ufid, ufid, sizeof *ufid);
1450 hmap_insert(&dev->offloaded_flows, &off_flow->node,
1451 netdev_dummy_flow_hash(ufid));
fa059073 1452 modify = false;
b4f86fcc
IM
1453 }
1454
1455 off_flow->mark = info->flow_mark;
1456 memcpy(&off_flow->match, match, sizeof *match);
1457
1458 /* As we have per-netdev 'offloaded_flows', we don't need to match
1459 * the 'in_port' for received packets. This will also allow offloading for
1460 * packets passed to 'receive' command without specifying the 'in_port'. */
1461 off_flow->match.wc.masks.in_port.odp_port = 0;
1462
1463 ovs_mutex_unlock(&dev->mutex);
1464
fa059073
IM
1465 if (VLOG_IS_DBG_ENABLED()) {
1466 struct ds ds = DS_EMPTY_INITIALIZER;
1467
1468 ds_put_format(&ds, "%s: flow put[%s]: ", netdev_get_name(netdev),
1469 modify ? "modify" : "create");
1470 odp_format_ufid(ufid, &ds);
1471 ds_put_cstr(&ds, " flow match: ");
1472 match_format(match, NULL, &ds, OFP_DEFAULT_PRIORITY);
1473 ds_put_format(&ds, ", mark: %"PRIu32, info->flow_mark);
1474
1475 VLOG_DBG("%s", ds_cstr(&ds));
1476 ds_destroy(&ds);
1477 }
1478
75ad1cd6
BP
1479 if (stats) {
1480 memset(stats, 0, sizeof *stats);
1481 }
b4f86fcc
IM
1482 return 0;
1483}
1484
1485static int
1486netdev_dummy_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
75ad1cd6 1487 struct dpif_flow_stats *stats)
b4f86fcc
IM
1488{
1489 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1490 struct offloaded_flow *off_flow;
fa059073
IM
1491 const char *error = NULL;
1492 uint32_t mark;
b4f86fcc
IM
1493
1494 ovs_mutex_lock(&dev->mutex);
1495
1496 off_flow = find_offloaded_flow(&dev->offloaded_flows, ufid);
1497 if (!off_flow) {
fa059073
IM
1498 error = "No such flow.";
1499 goto exit;
b4f86fcc
IM
1500 }
1501
fa059073 1502 mark = off_flow->mark;
b4f86fcc
IM
1503 hmap_remove(&dev->offloaded_flows, &off_flow->node);
1504 free(off_flow);
1505
fa059073 1506exit:
b4f86fcc 1507 ovs_mutex_unlock(&dev->mutex);
fa059073
IM
1508
1509 if (error || VLOG_IS_DBG_ENABLED()) {
1510 struct ds ds = DS_EMPTY_INITIALIZER;
1511
1512 ds_put_format(&ds, "%s: ", netdev_get_name(netdev));
1513 if (error) {
1514 ds_put_cstr(&ds, "failed to ");
1515 }
1516 ds_put_cstr(&ds, "flow del: ");
1517 odp_format_ufid(ufid, &ds);
1518 if (error) {
1519 ds_put_format(&ds, " error: %s", error);
1520 } else {
1521 ds_put_format(&ds, " mark: %"PRIu32, mark);
1522 }
1523 VLOG(error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1524 ds_destroy(&ds);
1525 }
1526
75ad1cd6
BP
1527 if (stats) {
1528 memset(stats, 0, sizeof *stats);
1529 }
fa059073 1530 return error ? -1 : 0;
b4f86fcc
IM
1531}
1532
89c09c1c
BP
1533#define NETDEV_DUMMY_CLASS_COMMON \
1534 .run = netdev_dummy_run, \
1535 .wait = netdev_dummy_wait, \
1536 .alloc = netdev_dummy_alloc, \
1537 .construct = netdev_dummy_construct, \
1538 .destruct = netdev_dummy_destruct, \
1539 .dealloc = netdev_dummy_dealloc, \
1540 .get_config = netdev_dummy_get_config, \
1541 .set_config = netdev_dummy_set_config, \
1542 .get_numa_id = netdev_dummy_get_numa_id, \
1543 .send = netdev_dummy_send, \
1544 .set_etheraddr = netdev_dummy_set_etheraddr, \
1545 .get_etheraddr = netdev_dummy_get_etheraddr, \
1546 .get_mtu = netdev_dummy_get_mtu, \
1547 .set_mtu = netdev_dummy_set_mtu, \
1548 .get_ifindex = netdev_dummy_get_ifindex, \
1549 .get_stats = netdev_dummy_get_stats, \
1550 .get_custom_stats = netdev_dummy_get_custom_stats, \
1551 .get_queue = netdev_dummy_get_queue, \
1552 .get_queue_stats = netdev_dummy_get_queue_stats, \
1553 .queue_dump_start = netdev_dummy_queue_dump_start, \
1554 .queue_dump_next = netdev_dummy_queue_dump_next, \
1555 .queue_dump_done = netdev_dummy_queue_dump_done, \
1556 .dump_queue_stats = netdev_dummy_dump_queue_stats, \
1557 .get_addr_list = netdev_dummy_get_addr_list, \
1558 .update_flags = netdev_dummy_update_flags, \
1559 .rxq_alloc = netdev_dummy_rxq_alloc, \
1560 .rxq_construct = netdev_dummy_rxq_construct, \
1561 .rxq_destruct = netdev_dummy_rxq_destruct, \
1562 .rxq_dealloc = netdev_dummy_rxq_dealloc, \
1563 .rxq_recv = netdev_dummy_rxq_recv, \
1564 .rxq_wait = netdev_dummy_rxq_wait, \
5fc5c50f 1565 .rxq_drain = netdev_dummy_rxq_drain
89c09c1c
BP
1566
1567static const struct netdev_class dummy_class = {
1568 NETDEV_DUMMY_CLASS_COMMON,
1569 .type = "dummy"
1570};
9a81a637 1571
89c09c1c
BP
1572static const struct netdev_class dummy_internal_class = {
1573 NETDEV_DUMMY_CLASS_COMMON,
1574 .type = "dummy-internal"
1575};
e98d0cb3 1576
89c09c1c
BP
1577static const struct netdev_class dummy_pmd_class = {
1578 NETDEV_DUMMY_CLASS_COMMON,
1579 .type = "dummy-pmd",
1580 .is_pmd = true,
1581 .reconfigure = netdev_dummy_reconfigure
1582};
5fc5c50f
IM
1583
1584static int
1585netdev_dummy_offloads_init_flow_api(struct netdev *netdev)
1586{
1587 return is_dummy_class(netdev->netdev_class) ? 0 : EOPNOTSUPP;
1588}
1589
4f746d52 1590static const struct netdev_flow_api netdev_offload_dummy = {
5fc5c50f
IM
1591 .type = "dummy",
1592 .flow_put = netdev_dummy_flow_put,
1593 .flow_del = netdev_dummy_flow_del,
1594 .init_flow_api = netdev_dummy_offloads_init_flow_api,
1595};
1596
b4f86fcc
IM
1597\f
1598/* Helper functions. */
796223f5 1599
8613db65
DDP
1600static void
1601pkt_list_delete(struct ovs_list *l)
1602{
1603 struct pkt_list_node *pkt;
1604
1605 LIST_FOR_EACH_POP(pkt, list_node, l) {
1606 dp_packet_delete(pkt->pkt);
1607 free(pkt);
1608 }
1609}
1610
cf62fa4c 1611static struct dp_packet *
d8ada236
AZ
1612eth_from_packet(const char *s)
1613{
1614 struct dp_packet *packet;
1615 eth_from_hex(s, &packet);
1616 return packet;
1617}
1618
1619static struct dp_packet *
4960f9ad
IM
1620eth_from_flow_str(const char *s, size_t packet_size,
1621 struct flow *flow, char **errorp)
fbac791a 1622{
d40533fc
BP
1623 *errorp = NULL;
1624
fbac791a 1625 enum odp_key_fitness fitness;
cf62fa4c 1626 struct dp_packet *packet;
fbac791a 1627 struct ofpbuf odp_key;
fbac791a
BP
1628 int error;
1629
fbac791a
BP
1630 /* Convert string to datapath key.
1631 *
1632 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1633 * the code for that currently calls exit() on parse error. We have to
1634 * settle for parsing a datapath key for now.
1635 */
1636 ofpbuf_init(&odp_key, 0);
d40533fc 1637 error = odp_flow_from_string(s, NULL, &odp_key, NULL, errorp);
fbac791a
BP
1638 if (error) {
1639 ofpbuf_uninit(&odp_key);
1640 return NULL;
1641 }
1642
1643 /* Convert odp_key to flow. */
4960f9ad 1644 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, flow, errorp);
fbac791a
BP
1645 if (fitness == ODP_FIT_ERROR) {
1646 ofpbuf_uninit(&odp_key);
1647 return NULL;
1648 }
1649
cf62fa4c 1650 packet = dp_packet_new(0);
6f068379 1651 if (packet_size) {
4960f9ad 1652 flow_compose(packet, flow, NULL, 0);
6f068379 1653 if (dp_packet_size(packet) < packet_size) {
4960f9ad 1654 packet_expand(packet, flow, packet_size);
6f068379
BP
1655 } else if (dp_packet_size(packet) > packet_size){
1656 dp_packet_delete(packet);
1657 packet = NULL;
1658 }
1659 } else {
4960f9ad 1660 flow_compose(packet, flow, NULL, 64);
6f068379 1661 }
fbac791a
BP
1662
1663 ofpbuf_uninit(&odp_key);
1664 return packet;
1665}
1666
323cc924 1667static void
cf62fa4c 1668netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
2273ea5a 1669{
8613db65
DDP
1670 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1671
1672 pkt_node->pkt = packet;
417e7e66 1673 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
eab5611a 1674 rx->recv_queue_len++;
98045eda 1675 seq_change(rx->seq);
eab5611a 1676}
2273ea5a 1677
eab5611a 1678static void
9a81a637 1679netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
4960f9ad 1680 struct flow *flow, int queue_id)
55d87b06 1681 OVS_REQUIRES(dummy->mutex)
eab5611a 1682{
f7791740 1683 struct netdev_rxq_dummy *rx, *prev;
4960f9ad
IM
1684 struct offloaded_flow *data;
1685 struct flow packet_flow;
eab5611a 1686
f7791740
PS
1687 if (dummy->rxq_pcap) {
1688 ovs_pcap_write(dummy->rxq_pcap, packet);
55d87b06 1689 }
4960f9ad
IM
1690
1691 if (!flow) {
1692 flow = &packet_flow;
1693 flow_extract(packet, flow);
1694 }
1695 HMAP_FOR_EACH (data, node, &dummy->offloaded_flows) {
1696 if (flow_equal_except(flow, &data->match.flow, &data->match.wc)) {
fa059073 1697
4960f9ad 1698 dp_packet_set_flow_mark(packet, data->mark);
fa059073
IM
1699
1700 if (VLOG_IS_DBG_ENABLED()) {
1701 struct ds ds = DS_EMPTY_INITIALIZER;
1702
1703 ds_put_format(&ds, "%s: packet: ",
1704 netdev_get_name(&dummy->up));
1705 /* 'flow' does not contain proper port number here.
1706 * Let's just clear it as it wildcarded anyway. */
1707 flow->in_port.ofp_port = 0;
1708 flow_format(&ds, flow, NULL);
1709
1710 ds_put_cstr(&ds, " matches with flow: ");
1711 odp_format_ufid(&data->ufid, &ds);
1712 ds_put_cstr(&ds, " ");
1713 match_format(&data->match, NULL, &ds, OFP_DEFAULT_PRIORITY);
1714 ds_put_format(&ds, " with mark: %"PRIu32, data->mark);
1715
1716 VLOG_DBG("%s", ds_cstr(&ds));
1717 ds_destroy(&ds);
1718 }
4960f9ad
IM
1719 break;
1720 }
1721 }
1722
eab5611a 1723 prev = NULL;
2273ea5a 1724 LIST_FOR_EACH (rx, node, &dummy->rxes) {
9a81a637
IM
1725 if (rx->up.queue_id == queue_id &&
1726 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
eab5611a 1727 if (prev) {
cf62fa4c 1728 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
eab5611a
BP
1729 }
1730 prev = rx;
2273ea5a
BP
1731 }
1732 }
eab5611a
BP
1733 if (prev) {
1734 netdev_dummy_queue_packet__(prev, packet);
1735 } else {
cf62fa4c 1736 dp_packet_delete(packet);
eab5611a 1737 }
2273ea5a
BP
1738}
1739
fbac791a
BP
1740static void
1741netdev_dummy_receive(struct unixctl_conn *conn,
1742 int argc, const char *argv[], void *aux OVS_UNUSED)
1743{
b5d57fc8 1744 struct netdev_dummy *dummy_dev;
86f1d032 1745 struct netdev *netdev;
9a81a637 1746 int i, k = 1, rx_qid = 0;
fbac791a 1747
9a81a637 1748 netdev = netdev_from_name(argv[k++]);
86f1d032 1749 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
bde9f75d 1750 unixctl_command_reply_error(conn, "no such dummy netdev");
9a81a637 1751 goto exit_netdev;
fbac791a 1752 }
86f1d032 1753 dummy_dev = netdev_dummy_cast(netdev);
fbac791a 1754
9a81a637
IM
1755 ovs_mutex_lock(&dummy_dev->mutex);
1756
1757 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1758 rx_qid = strtol(argv[k + 1], NULL, 10);
1759 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1760 unixctl_command_reply_error(conn, "bad rx queue id.");
1761 goto exit;
1762 }
1763 k += 2;
1764 }
1765
1766 for (i = k; i < argc; i++) {
cf62fa4c 1767 struct dp_packet *packet;
4960f9ad 1768 struct flow flow;
fbac791a 1769
d8ada236
AZ
1770 /* Try to parse 'argv[i]' as packet in hex. */
1771 packet = eth_from_packet(argv[i]);
1772
fbac791a 1773 if (!packet) {
1e2eecbb
IM
1774 int packet_size = 0;
1775 const char *flow_str = argv[i];
1776
1777 /* Parse optional --len argument immediately follows a 'flow'. */
1778 if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) {
1779 packet_size = strtol(argv[i + 2], NULL, 10);
1780
1781 if (packet_size < ETH_TOTAL_MIN) {
1782 unixctl_command_reply_error(conn, "too small packet len");
1783 goto exit;
1784 }
df3a6d50 1785 i += 2;
1e2eecbb 1786 }
d8ada236 1787 /* Try parse 'argv[i]' as odp flow. */
d40533fc 1788 char *error_s;
4960f9ad 1789 packet = eth_from_flow_str(flow_str, packet_size, &flow, &error_s);
d8ada236 1790 if (!packet) {
d40533fc
BP
1791 unixctl_command_reply_error(conn, error_s);
1792 free(error_s);
d8ada236
AZ
1793 goto exit;
1794 }
4960f9ad
IM
1795 } else {
1796 flow_extract(packet, &flow);
fbac791a
BP
1797 }
1798
4960f9ad 1799 netdev_dummy_queue_packet(dummy_dev, packet, &flow, rx_qid);
fbac791a
BP
1800 }
1801
323cc924 1802 unixctl_command_reply(conn, NULL);
86f1d032
BP
1803
1804exit:
9a81a637
IM
1805 ovs_mutex_unlock(&dummy_dev->mutex);
1806exit_netdev:
86f1d032 1807 netdev_close(netdev);
fbac791a
BP
1808}
1809
95d4ec33 1810static void
b5d57fc8 1811netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
86383816 1812 OVS_REQUIRES(dev->mutex)
95d4ec33
EJ
1813{
1814 enum netdev_flags old_flags;
1815
1816 if (admin_state) {
86383816 1817 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
95d4ec33 1818 } else {
86383816 1819 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
95d4ec33
EJ
1820 }
1821}
1822
1823static void
1824netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1825 const char *argv[], void *aux OVS_UNUSED)
1826{
1827 bool up;
1828
1829 if (!strcasecmp(argv[argc - 1], "up")) {
1830 up = true;
1831 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1832 up = false;
1833 } else {
1834 unixctl_command_reply_error(conn, "Invalid Admin State");
1835 return;
1836 }
1837
1838 if (argc > 2) {
86f1d032
BP
1839 struct netdev *netdev = netdev_from_name(argv[1]);
1840 if (netdev && is_dummy_class(netdev->netdev_class)) {
1841 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
95d4ec33 1842
86383816 1843 ovs_mutex_lock(&dummy_dev->mutex);
b5d57fc8 1844 netdev_dummy_set_admin_state__(dummy_dev, up);
86383816
BP
1845 ovs_mutex_unlock(&dummy_dev->mutex);
1846
86f1d032 1847 netdev_close(netdev);
95d4ec33
EJ
1848 } else {
1849 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
86f1d032 1850 netdev_close(netdev);
95d4ec33
EJ
1851 return;
1852 }
1853 } else {
5e1de67f 1854 struct netdev_dummy *netdev;
86383816 1855
5e1de67f
BP
1856 ovs_mutex_lock(&dummy_list_mutex);
1857 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
86383816
BP
1858 ovs_mutex_lock(&netdev->mutex);
1859 netdev_dummy_set_admin_state__(netdev, up);
1860 ovs_mutex_unlock(&netdev->mutex);
95d4ec33 1861 }
5e1de67f 1862 ovs_mutex_unlock(&dummy_list_mutex);
95d4ec33
EJ
1863 }
1864 unixctl_command_reply(conn, "OK");
1865}
1866
7d7fffe8
AZ
1867static void
1868display_conn_state__(struct ds *s, const char *name,
1869 enum dummy_netdev_conn_state state)
1870{
1871 ds_put_format(s, "%s: ", name);
1872
1873 switch (state) {
1874 case CONN_STATE_CONNECTED:
1875 ds_put_cstr(s, "connected\n");
1876 break;
1877
1878 case CONN_STATE_NOT_CONNECTED:
1879 ds_put_cstr(s, "disconnected\n");
1880 break;
1881
1882 case CONN_STATE_UNKNOWN:
1883 default:
1884 ds_put_cstr(s, "unknown\n");
1885 break;
1886 };
1887}
1888
1889static void
1890netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1891 const char *argv[], void *aux OVS_UNUSED)
1892{
1893 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1894 struct ds s;
1895
1896 ds_init(&s);
1897
1898 if (argc > 1) {
1899 const char *dev_name = argv[1];
1900 struct netdev *netdev = netdev_from_name(dev_name);
1901
1902 if (netdev && is_dummy_class(netdev->netdev_class)) {
1903 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1904
1905 ovs_mutex_lock(&dummy_dev->mutex);
1906 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1907 ovs_mutex_unlock(&dummy_dev->mutex);
1908
1909 netdev_close(netdev);
1910 }
1911 display_conn_state__(&s, dev_name, state);
1912 } else {
1913 struct netdev_dummy *netdev;
1914
1915 ovs_mutex_lock(&dummy_list_mutex);
1916 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1917 ovs_mutex_lock(&netdev->mutex);
1918 state = dummy_netdev_get_conn_state(&netdev->conn);
1919 ovs_mutex_unlock(&netdev->mutex);
1920 if (state != CONN_STATE_UNKNOWN) {
1921 display_conn_state__(&s, netdev->up.name, state);
1922 }
1923 }
1924 ovs_mutex_unlock(&dummy_list_mutex);
1925 }
1926
1927 unixctl_command_reply(conn, ds_cstr(&s));
1928 ds_destroy(&s);
1929}
1930
a36de779
PS
1931static void
1932netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1933 const char *argv[], void *aux OVS_UNUSED)
1934{
1935 struct netdev *netdev = netdev_from_name(argv[1]);
1936
1937 if (netdev && is_dummy_class(netdev->netdev_class)) {
e7695092
BP
1938 struct in_addr ip, mask;
1939 char *error;
a36de779 1940
e7695092
BP
1941 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1942 if (!error) {
a36de779
PS
1943 netdev_dummy_set_in4(netdev, ip, mask);
1944 unixctl_command_reply(conn, "OK");
1945 } else {
e7695092
BP
1946 unixctl_command_reply_error(conn, error);
1947 free(error);
a36de779 1948 }
2af602f2
TLSC
1949 } else {
1950 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1951 }
1952
1953 netdev_close(netdev);
1954}
1955
1956static void
1957netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1958 const char *argv[], void *aux OVS_UNUSED)
1959{
1960 struct netdev *netdev = netdev_from_name(argv[1]);
a36de779 1961
2af602f2 1962 if (netdev && is_dummy_class(netdev->netdev_class)) {
2af602f2 1963 struct in6_addr ip6;
a8704b50
PS
1964 char *error;
1965 uint32_t plen;
2af602f2 1966
a8704b50
PS
1967 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1968 if (!error) {
1969 struct in6_addr mask;
1970
1971 mask = ipv6_create_mask(plen);
1972 netdev_dummy_set_in6(netdev, &ip6, &mask);
2af602f2
TLSC
1973 unixctl_command_reply(conn, "OK");
1974 } else {
a8704b50
PS
1975 unixctl_command_reply_error(conn, error);
1976 free(error);
2af602f2 1977 }
a36de779
PS
1978 } else {
1979 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
a36de779
PS
1980 }
1981
2af602f2 1982 netdev_close(netdev);
a36de779
PS
1983}
1984
2af602f2 1985
8420c7ad
BP
1986static void
1987netdev_dummy_override(const char *type)
1988{
1989 if (!netdev_unregister_provider(type)) {
1990 struct netdev_class *class;
1991 int error;
1992
1993 class = xmemdup(&dummy_class, sizeof dummy_class);
1994 class->type = xstrdup(type);
1995 error = netdev_register_provider(class);
1996 if (error) {
1997 VLOG_ERR("%s: failed to register netdev provider (%s)",
1998 type, ovs_strerror(error));
1999 free(CONST_CAST(char *, class->type));
2000 free(class);
2001 }
2002 }
2003}
2004
614c4892 2005void
8420c7ad 2006netdev_dummy_register(enum dummy_level level)
614c4892 2007{
9a81a637 2008 unixctl_command_register("netdev-dummy/receive",
d8ada236 2009 "name [--qid queue_id] packet|flow [--len packet_len]",
fbac791a 2010 2, INT_MAX, netdev_dummy_receive, NULL);
95d4ec33
EJ
2011 unixctl_command_register("netdev-dummy/set-admin-state",
2012 "[netdev] up|down", 1, 2,
2013 netdev_dummy_set_admin_state, NULL);
7d7fffe8
AZ
2014 unixctl_command_register("netdev-dummy/conn-state",
2015 "[netdev]", 0, 1,
2016 netdev_dummy_conn_state, NULL);
a36de779
PS
2017 unixctl_command_register("netdev-dummy/ip4addr",
2018 "[netdev] ipaddr/mask-prefix-len", 2, 2,
2019 netdev_dummy_ip4addr, NULL);
2af602f2
TLSC
2020 unixctl_command_register("netdev-dummy/ip6addr",
2021 "[netdev] ip6addr", 2, 2,
2022 netdev_dummy_ip6addr, NULL);
a36de779 2023
8420c7ad 2024 if (level == DUMMY_OVERRIDE_ALL) {
0cbfe35d
BP
2025 struct sset types;
2026 const char *type;
2027
2028 sset_init(&types);
2029 netdev_enumerate_types(&types);
2030 SSET_FOR_EACH (type, &types) {
8420c7ad
BP
2031 if (strcmp(type, "patch")) {
2032 netdev_dummy_override(type);
0cbfe35d
BP
2033 }
2034 }
2035 sset_destroy(&types);
8420c7ad
BP
2036 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
2037 netdev_dummy_override("system");
0cbfe35d
BP
2038 }
2039 netdev_register_provider(&dummy_class);
e98d0cb3 2040 netdev_register_provider(&dummy_internal_class);
f9176a3a 2041 netdev_register_provider(&dummy_pmd_class);
c060c4cf 2042
4f746d52 2043 netdev_register_flow_api_provider(&netdev_offload_dummy);
5fc5c50f 2044
7c54c27f 2045 netdev_vport_tunnel_register();
614c4892 2046}