]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
netdev: Fix deadlock when netdev_dump_queues() callback calls into netdev.
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22
23 #include "flow.h"
24 #include "list.h"
25 #include "netdev-provider.h"
26 #include "netdev-vport.h"
27 #include "odp-util.h"
28 #include "ofp-print.h"
29 #include "ofpbuf.h"
30 #include "packets.h"
31 #include "poll-loop.h"
32 #include "shash.h"
33 #include "sset.h"
34 #include "stream.h"
35 #include "unaligned.h"
36 #include "unixctl.h"
37 #include "vlog.h"
38
39 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
40
41 struct dummy_stream {
42 struct stream *stream;
43 struct ofpbuf rxbuf;
44 struct list txq;
45 };
46
47 /* Protects 'dummy_list'. */
48 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
49
50 /* Contains all 'struct dummy_dev's. */
51 static struct list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
52 = LIST_INITIALIZER(&dummy_list);
53
54 struct netdev_dummy {
55 struct netdev up;
56
57 /* In dummy_list. */
58 struct list list_node OVS_GUARDED_BY(dummy_list_mutex);
59
60 /* Protects all members below. */
61 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
62
63 uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
64 int mtu OVS_GUARDED;
65 struct netdev_stats stats OVS_GUARDED;
66 enum netdev_flags flags OVS_GUARDED;
67 unsigned int change_seq OVS_GUARDED;
68 int ifindex OVS_GUARDED;
69
70 struct pstream *pstream OVS_GUARDED;
71 struct dummy_stream *streams OVS_GUARDED;
72 size_t n_streams OVS_GUARDED;
73
74 struct list rxes OVS_GUARDED; /* List of child "netdev_rx_dummy"s. */
75 };
76
77 /* Max 'recv_queue_len' in struct netdev_dummy. */
78 #define NETDEV_DUMMY_MAX_QUEUE 100
79
80 struct netdev_rx_dummy {
81 struct netdev_rx up;
82 struct list node; /* In netdev_dummy's "rxes" list. */
83 struct list recv_queue;
84 int recv_queue_len; /* list_size(&recv_queue). */
85 bool listening;
86 };
87
88 static unixctl_cb_func netdev_dummy_set_admin_state;
89 static int netdev_dummy_construct(struct netdev *);
90 static void netdev_dummy_poll_notify(struct netdev_dummy *netdev)
91 OVS_REQUIRES(netdev->mutex);
92 static void netdev_dummy_queue_packet(struct netdev_dummy *, struct ofpbuf *);
93
94 static void dummy_stream_close(struct dummy_stream *);
95
96 static bool
97 is_dummy_class(const struct netdev_class *class)
98 {
99 return class->construct == netdev_dummy_construct;
100 }
101
102 static struct netdev_dummy *
103 netdev_dummy_cast(const struct netdev *netdev)
104 {
105 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
106 return CONTAINER_OF(netdev, struct netdev_dummy, up);
107 }
108
109 static struct netdev_rx_dummy *
110 netdev_rx_dummy_cast(const struct netdev_rx *rx)
111 {
112 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
113 return CONTAINER_OF(rx, struct netdev_rx_dummy, up);
114 }
115
116 static void
117 netdev_dummy_run(void)
118 {
119 struct netdev_dummy *dev;
120
121 ovs_mutex_lock(&dummy_list_mutex);
122 LIST_FOR_EACH (dev, list_node, &dummy_list) {
123 size_t i;
124
125 ovs_mutex_lock(&dev->mutex);
126
127 if (dev->pstream) {
128 struct stream *new_stream;
129 int error;
130
131 error = pstream_accept(dev->pstream, &new_stream);
132 if (!error) {
133 struct dummy_stream *s;
134
135 dev->streams = xrealloc(dev->streams,
136 ((dev->n_streams + 1)
137 * sizeof *dev->streams));
138 s = &dev->streams[dev->n_streams++];
139 s->stream = new_stream;
140 ofpbuf_init(&s->rxbuf, 2048);
141 list_init(&s->txq);
142 } else if (error != EAGAIN) {
143 VLOG_WARN("%s: accept failed (%s)",
144 pstream_get_name(dev->pstream), ovs_strerror(error));
145 pstream_close(dev->pstream);
146 dev->pstream = NULL;
147 }
148 }
149
150 for (i = 0; i < dev->n_streams; i++) {
151 struct dummy_stream *s = &dev->streams[i];
152 int error = 0;
153 size_t n;
154
155 stream_run(s->stream);
156
157 if (!list_is_empty(&s->txq)) {
158 struct ofpbuf *txbuf;
159 int retval;
160
161 txbuf = ofpbuf_from_list(list_front(&s->txq));
162 retval = stream_send(s->stream, txbuf->data, txbuf->size);
163 if (retval > 0) {
164 ofpbuf_pull(txbuf, retval);
165 if (!txbuf->size) {
166 list_remove(&txbuf->list_node);
167 ofpbuf_delete(txbuf);
168 }
169 } else if (retval != -EAGAIN) {
170 error = -retval;
171 }
172 }
173
174 if (!error) {
175 if (s->rxbuf.size < 2) {
176 n = 2 - s->rxbuf.size;
177 } else {
178 uint16_t frame_len;
179
180 frame_len = ntohs(get_unaligned_be16(s->rxbuf.data));
181 if (frame_len < ETH_HEADER_LEN) {
182 error = EPROTO;
183 n = 0;
184 } else {
185 n = (2 + frame_len) - s->rxbuf.size;
186 }
187 }
188 }
189 if (!error) {
190 int retval;
191
192 ofpbuf_prealloc_tailroom(&s->rxbuf, n);
193 retval = stream_recv(s->stream, ofpbuf_tail(&s->rxbuf), n);
194 if (retval > 0) {
195 s->rxbuf.size += retval;
196 if (retval == n && s->rxbuf.size > 2) {
197 ofpbuf_pull(&s->rxbuf, 2);
198 netdev_dummy_queue_packet(dev,
199 ofpbuf_clone(&s->rxbuf));
200 ofpbuf_clear(&s->rxbuf);
201 }
202 } else if (retval != -EAGAIN) {
203 error = (retval < 0 ? -retval
204 : s->rxbuf.size ? EPROTO
205 : EOF);
206 }
207 }
208
209 if (error) {
210 VLOG_DBG("%s: closing connection (%s)",
211 stream_get_name(s->stream),
212 ovs_retval_to_string(error));
213 dummy_stream_close(&dev->streams[i]);
214 dev->streams[i] = dev->streams[--dev->n_streams];
215 }
216 }
217
218 ovs_mutex_unlock(&dev->mutex);
219 }
220 ovs_mutex_unlock(&dummy_list_mutex);
221 }
222
223 static void
224 dummy_stream_close(struct dummy_stream *s)
225 {
226 stream_close(s->stream);
227 ofpbuf_uninit(&s->rxbuf);
228 ofpbuf_list_delete(&s->txq);
229 }
230
231 static void
232 netdev_dummy_wait(void)
233 {
234 struct netdev_dummy *dev;
235
236 ovs_mutex_lock(&dummy_list_mutex);
237 LIST_FOR_EACH (dev, list_node, &dummy_list) {
238 size_t i;
239
240 ovs_mutex_lock(&dev->mutex);
241 if (dev->pstream) {
242 pstream_wait(dev->pstream);
243 }
244 for (i = 0; i < dev->n_streams; i++) {
245 struct dummy_stream *s = &dev->streams[i];
246
247 stream_run_wait(s->stream);
248 if (!list_is_empty(&s->txq)) {
249 stream_send_wait(s->stream);
250 }
251 stream_recv_wait(s->stream);
252 }
253 ovs_mutex_unlock(&dev->mutex);
254 }
255 ovs_mutex_unlock(&dummy_list_mutex);
256 }
257
258 static struct netdev *
259 netdev_dummy_alloc(void)
260 {
261 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
262 return &netdev->up;
263 }
264
265 static int
266 netdev_dummy_construct(struct netdev *netdev_)
267 {
268 static atomic_uint next_n = ATOMIC_VAR_INIT(0xaa550000);
269 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
270 unsigned int n;
271
272 atomic_add(&next_n, 1, &n);
273
274 ovs_mutex_init(&netdev->mutex);
275 ovs_mutex_lock(&netdev->mutex);
276 netdev->hwaddr[0] = 0xaa;
277 netdev->hwaddr[1] = 0x55;
278 netdev->hwaddr[2] = n >> 24;
279 netdev->hwaddr[3] = n >> 16;
280 netdev->hwaddr[4] = n >> 8;
281 netdev->hwaddr[5] = n;
282 netdev->mtu = 1500;
283 netdev->flags = 0;
284 netdev->change_seq = 1;
285 netdev->ifindex = -EOPNOTSUPP;
286
287 netdev->pstream = NULL;
288 netdev->streams = NULL;
289 netdev->n_streams = 0;
290
291 list_init(&netdev->rxes);
292 ovs_mutex_unlock(&netdev->mutex);
293
294 ovs_mutex_lock(&dummy_list_mutex);
295 list_push_back(&dummy_list, &netdev->list_node);
296 ovs_mutex_unlock(&dummy_list_mutex);
297
298 return 0;
299 }
300
301 static void
302 netdev_dummy_destruct(struct netdev *netdev_)
303 {
304 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
305 size_t i;
306
307 ovs_mutex_lock(&dummy_list_mutex);
308 list_remove(&netdev->list_node);
309 ovs_mutex_unlock(&dummy_list_mutex);
310
311 ovs_mutex_lock(&netdev->mutex);
312 pstream_close(netdev->pstream);
313 for (i = 0; i < netdev->n_streams; i++) {
314 dummy_stream_close(&netdev->streams[i]);
315 }
316 free(netdev->streams);
317 ovs_mutex_unlock(&netdev->mutex);
318 ovs_mutex_destroy(&netdev->mutex);
319 }
320
321 static void
322 netdev_dummy_dealloc(struct netdev *netdev_)
323 {
324 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
325
326 free(netdev);
327 }
328
329 static int
330 netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
331 {
332 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
333
334 ovs_mutex_lock(&netdev->mutex);
335 if (netdev->ifindex >= 0) {
336 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
337 }
338 if (netdev->pstream) {
339 smap_add(args, "pstream", pstream_get_name(netdev->pstream));
340 }
341 ovs_mutex_unlock(&netdev->mutex);
342
343 return 0;
344 }
345
346 static int
347 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
348 {
349 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
350 const char *pstream;
351
352 ovs_mutex_lock(&netdev->mutex);
353 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
354
355 pstream = smap_get(args, "pstream");
356 if (!pstream
357 || !netdev->pstream
358 || strcmp(pstream_get_name(netdev->pstream), pstream)) {
359 pstream_close(netdev->pstream);
360 netdev->pstream = NULL;
361
362 if (pstream) {
363 int error;
364
365 error = pstream_open(pstream, &netdev->pstream, DSCP_DEFAULT);
366 if (error) {
367 VLOG_WARN("%s: open failed (%s)",
368 pstream, ovs_strerror(error));
369 }
370 }
371 }
372 ovs_mutex_unlock(&netdev->mutex);
373
374 return 0;
375 }
376
377 static struct netdev_rx *
378 netdev_dummy_rx_alloc(void)
379 {
380 struct netdev_rx_dummy *rx = xzalloc(sizeof *rx);
381 return &rx->up;
382 }
383
384 static int
385 netdev_dummy_rx_construct(struct netdev_rx *rx_)
386 {
387 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
388 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
389
390 ovs_mutex_lock(&netdev->mutex);
391 list_push_back(&netdev->rxes, &rx->node);
392 list_init(&rx->recv_queue);
393 rx->recv_queue_len = 0;
394 ovs_mutex_unlock(&netdev->mutex);
395
396 return 0;
397 }
398
399 static void
400 netdev_dummy_rx_destruct(struct netdev_rx *rx_)
401 {
402 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
403 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
404
405 ovs_mutex_lock(&netdev->mutex);
406 list_remove(&rx->node);
407 ofpbuf_list_delete(&rx->recv_queue);
408 ovs_mutex_unlock(&netdev->mutex);
409 }
410
411 static void
412 netdev_dummy_rx_dealloc(struct netdev_rx *rx_)
413 {
414 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
415
416 free(rx);
417 }
418
419 static int
420 netdev_dummy_rx_recv(struct netdev_rx *rx_, void *buffer, size_t size)
421 {
422 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
423 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
424 struct ofpbuf *packet;
425 int retval;
426
427 ovs_mutex_lock(&netdev->mutex);
428 if (!list_is_empty(&rx->recv_queue)) {
429 packet = ofpbuf_from_list(list_pop_front(&rx->recv_queue));
430 rx->recv_queue_len--;
431 } else {
432 packet = NULL;
433 }
434 ovs_mutex_unlock(&netdev->mutex);
435
436 if (!packet) {
437 return -EAGAIN;
438 }
439
440 if (packet->size <= size) {
441 memcpy(buffer, packet->data, packet->size);
442 retval = packet->size;
443 } else {
444 retval = -EMSGSIZE;
445 }
446 ofpbuf_delete(packet);
447
448 return retval;
449 }
450
451 static void
452 netdev_dummy_rx_wait(struct netdev_rx *rx_)
453 {
454 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
455 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
456
457 ovs_mutex_lock(&netdev->mutex);
458 if (!list_is_empty(&rx->recv_queue)) {
459 poll_immediate_wake();
460 }
461 ovs_mutex_unlock(&netdev->mutex);
462 }
463
464 static int
465 netdev_dummy_rx_drain(struct netdev_rx *rx_)
466 {
467 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
468 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
469
470 ovs_mutex_lock(&netdev->mutex);
471 ofpbuf_list_delete(&rx->recv_queue);
472 rx->recv_queue_len = 0;
473 ovs_mutex_unlock(&netdev->mutex);
474
475 return 0;
476 }
477
478 static int
479 netdev_dummy_send(struct netdev *netdev, const void *buffer, size_t size)
480 {
481 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
482 size_t i;
483
484 if (size < ETH_HEADER_LEN) {
485 return EMSGSIZE;
486 } else {
487 const struct eth_header *eth = buffer;
488 int max_size;
489
490 ovs_mutex_lock(&dev->mutex);
491 max_size = dev->mtu + ETH_HEADER_LEN;
492 ovs_mutex_unlock(&dev->mutex);
493
494 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
495 max_size += VLAN_HEADER_LEN;
496 }
497 if (size > max_size) {
498 return EMSGSIZE;
499 }
500 }
501
502 ovs_mutex_lock(&dev->mutex);
503 dev->stats.tx_packets++;
504 dev->stats.tx_bytes += size;
505
506 for (i = 0; i < dev->n_streams; i++) {
507 struct dummy_stream *s = &dev->streams[i];
508
509 if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
510 struct ofpbuf *b;
511
512 b = ofpbuf_clone_data_with_headroom(buffer, size, 2);
513 put_unaligned_be16(ofpbuf_push_uninit(b, 2), htons(size));
514 list_push_back(&s->txq, &b->list_node);
515 }
516 }
517 ovs_mutex_unlock(&dev->mutex);
518
519 return 0;
520 }
521
522 static int
523 netdev_dummy_set_etheraddr(struct netdev *netdev,
524 const uint8_t mac[ETH_ADDR_LEN])
525 {
526 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
527
528 ovs_mutex_lock(&dev->mutex);
529 if (!eth_addr_equals(dev->hwaddr, mac)) {
530 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
531 netdev_dummy_poll_notify(dev);
532 }
533 ovs_mutex_unlock(&dev->mutex);
534
535 return 0;
536 }
537
538 static int
539 netdev_dummy_get_etheraddr(const struct netdev *netdev,
540 uint8_t mac[ETH_ADDR_LEN])
541 {
542 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
543
544 ovs_mutex_lock(&dev->mutex);
545 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
546 ovs_mutex_unlock(&dev->mutex);
547
548 return 0;
549 }
550
551 static int
552 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
553 {
554 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
555
556 ovs_mutex_lock(&dev->mutex);
557 *mtup = dev->mtu;
558 ovs_mutex_unlock(&dev->mutex);
559
560 return 0;
561 }
562
563 static int
564 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
565 {
566 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
567
568 ovs_mutex_lock(&dev->mutex);
569 dev->mtu = mtu;
570 ovs_mutex_unlock(&dev->mutex);
571
572 return 0;
573 }
574
575 static int
576 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
577 {
578 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
579
580 ovs_mutex_lock(&dev->mutex);
581 *stats = dev->stats;
582 ovs_mutex_unlock(&dev->mutex);
583
584 return 0;
585 }
586
587 static int
588 netdev_dummy_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
589 {
590 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
591
592 ovs_mutex_lock(&dev->mutex);
593 dev->stats = *stats;
594 ovs_mutex_unlock(&dev->mutex);
595
596 return 0;
597 }
598
599 static int
600 netdev_dummy_get_ifindex(const struct netdev *netdev)
601 {
602 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
603 int ifindex;
604
605 ovs_mutex_lock(&dev->mutex);
606 ifindex = dev->ifindex;
607 ovs_mutex_unlock(&dev->mutex);
608
609 return ifindex;
610 }
611
612 static int
613 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
614 enum netdev_flags off, enum netdev_flags on,
615 enum netdev_flags *old_flagsp)
616 OVS_REQUIRES(netdev->mutex)
617 {
618 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
619 return EINVAL;
620 }
621
622 *old_flagsp = netdev->flags;
623 netdev->flags |= on;
624 netdev->flags &= ~off;
625 if (*old_flagsp != netdev->flags) {
626 netdev_dummy_poll_notify(netdev);
627 }
628
629 return 0;
630 }
631
632 static int
633 netdev_dummy_update_flags(struct netdev *netdev_,
634 enum netdev_flags off, enum netdev_flags on,
635 enum netdev_flags *old_flagsp)
636 {
637 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
638 int error;
639
640 ovs_mutex_lock(&netdev->mutex);
641 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
642 ovs_mutex_unlock(&netdev->mutex);
643
644 return error;
645 }
646
647 static unsigned int
648 netdev_dummy_change_seq(const struct netdev *netdev_)
649 {
650 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
651 unsigned int change_seq;
652
653 ovs_mutex_lock(&netdev->mutex);
654 change_seq = netdev->change_seq;
655 ovs_mutex_unlock(&netdev->mutex);
656
657 return change_seq;
658 }
659 \f
660 /* Helper functions. */
661
662 static void
663 netdev_dummy_poll_notify(struct netdev_dummy *dev)
664 {
665 dev->change_seq++;
666 if (!dev->change_seq) {
667 dev->change_seq++;
668 }
669 }
670
671 static const struct netdev_class dummy_class = {
672 "dummy",
673 NULL, /* init */
674 netdev_dummy_run,
675 netdev_dummy_wait,
676
677 netdev_dummy_alloc,
678 netdev_dummy_construct,
679 netdev_dummy_destruct,
680 netdev_dummy_dealloc,
681 netdev_dummy_get_config,
682 netdev_dummy_set_config,
683 NULL, /* get_tunnel_config */
684
685 netdev_dummy_send, /* send */
686 NULL, /* send_wait */
687
688 netdev_dummy_set_etheraddr,
689 netdev_dummy_get_etheraddr,
690 netdev_dummy_get_mtu,
691 netdev_dummy_set_mtu,
692 netdev_dummy_get_ifindex,
693 NULL, /* get_carrier */
694 NULL, /* get_carrier_resets */
695 NULL, /* get_miimon */
696 netdev_dummy_get_stats,
697 netdev_dummy_set_stats,
698
699 NULL, /* get_features */
700 NULL, /* set_advertisements */
701
702 NULL, /* set_policing */
703 NULL, /* get_qos_types */
704 NULL, /* get_qos_capabilities */
705 NULL, /* get_qos */
706 NULL, /* set_qos */
707 NULL, /* get_queue */
708 NULL, /* set_queue */
709 NULL, /* delete_queue */
710 NULL, /* get_queue_stats */
711 NULL, /* queue_dump_start */
712 NULL, /* queue_dump_next */
713 NULL, /* queue_dump_done */
714 NULL, /* dump_queue_stats */
715
716 NULL, /* get_in4 */
717 NULL, /* set_in4 */
718 NULL, /* get_in6 */
719 NULL, /* add_router */
720 NULL, /* get_next_hop */
721 NULL, /* get_status */
722 NULL, /* arp_lookup */
723
724 netdev_dummy_update_flags,
725
726 netdev_dummy_change_seq,
727
728 netdev_dummy_rx_alloc,
729 netdev_dummy_rx_construct,
730 netdev_dummy_rx_destruct,
731 netdev_dummy_rx_dealloc,
732 netdev_dummy_rx_recv,
733 netdev_dummy_rx_wait,
734 netdev_dummy_rx_drain,
735 };
736
737 static struct ofpbuf *
738 eth_from_packet_or_flow(const char *s)
739 {
740 enum odp_key_fitness fitness;
741 struct ofpbuf *packet;
742 struct ofpbuf odp_key;
743 struct flow flow;
744 int error;
745
746 if (!eth_from_hex(s, &packet)) {
747 return packet;
748 }
749
750 /* Convert string to datapath key.
751 *
752 * It would actually be nicer to parse an OpenFlow-like flow key here, but
753 * the code for that currently calls exit() on parse error. We have to
754 * settle for parsing a datapath key for now.
755 */
756 ofpbuf_init(&odp_key, 0);
757 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
758 if (error) {
759 ofpbuf_uninit(&odp_key);
760 return NULL;
761 }
762
763 /* Convert odp_key to flow. */
764 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
765 if (fitness == ODP_FIT_ERROR) {
766 ofpbuf_uninit(&odp_key);
767 return NULL;
768 }
769
770 packet = ofpbuf_new(0);
771 flow_compose(packet, &flow);
772
773 ofpbuf_uninit(&odp_key);
774 return packet;
775 }
776
777 static void
778 netdev_dummy_queue_packet__(struct netdev_rx_dummy *rx, struct ofpbuf *packet)
779 {
780 list_push_back(&rx->recv_queue, &packet->list_node);
781 rx->recv_queue_len++;
782 }
783
784 static void
785 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct ofpbuf *packet)
786 {
787 struct netdev_rx_dummy *rx, *prev;
788
789 prev = NULL;
790 LIST_FOR_EACH (rx, node, &dummy->rxes) {
791 if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
792 if (prev) {
793 netdev_dummy_queue_packet__(prev, ofpbuf_clone(packet));
794 }
795 prev = rx;
796 }
797 }
798 if (prev) {
799 netdev_dummy_queue_packet__(prev, packet);
800 } else {
801 ofpbuf_delete(packet);
802 }
803 }
804
805 static void
806 netdev_dummy_receive(struct unixctl_conn *conn,
807 int argc, const char *argv[], void *aux OVS_UNUSED)
808 {
809 struct netdev_dummy *dummy_dev;
810 struct netdev *netdev;
811 int i;
812
813 netdev = netdev_from_name(argv[1]);
814 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
815 unixctl_command_reply_error(conn, "no such dummy netdev");
816 goto exit;
817 }
818 dummy_dev = netdev_dummy_cast(netdev);
819
820 for (i = 2; i < argc; i++) {
821 struct ofpbuf *packet;
822
823 packet = eth_from_packet_or_flow(argv[i]);
824 if (!packet) {
825 unixctl_command_reply_error(conn, "bad packet syntax");
826 goto exit;
827 }
828
829 ovs_mutex_lock(&dummy_dev->mutex);
830 dummy_dev->stats.rx_packets++;
831 dummy_dev->stats.rx_bytes += packet->size;
832 netdev_dummy_queue_packet(dummy_dev, packet);
833 ovs_mutex_unlock(&dummy_dev->mutex);
834 }
835
836 unixctl_command_reply(conn, NULL);
837
838 exit:
839 netdev_close(netdev);
840 }
841
842 static void
843 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
844 OVS_REQUIRES(dev->mutex)
845 {
846 enum netdev_flags old_flags;
847
848 if (admin_state) {
849 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
850 } else {
851 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
852 }
853 }
854
855 static void
856 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
857 const char *argv[], void *aux OVS_UNUSED)
858 {
859 bool up;
860
861 if (!strcasecmp(argv[argc - 1], "up")) {
862 up = true;
863 } else if ( !strcasecmp(argv[argc - 1], "down")) {
864 up = false;
865 } else {
866 unixctl_command_reply_error(conn, "Invalid Admin State");
867 return;
868 }
869
870 if (argc > 2) {
871 struct netdev *netdev = netdev_from_name(argv[1]);
872 if (netdev && is_dummy_class(netdev->netdev_class)) {
873 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
874
875 ovs_mutex_lock(&dummy_dev->mutex);
876 netdev_dummy_set_admin_state__(dummy_dev, up);
877 ovs_mutex_unlock(&dummy_dev->mutex);
878
879 netdev_close(netdev);
880 } else {
881 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
882 netdev_close(netdev);
883 return;
884 }
885 } else {
886 struct netdev_dummy *netdev;
887
888 ovs_mutex_lock(&dummy_list_mutex);
889 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
890 ovs_mutex_lock(&netdev->mutex);
891 netdev_dummy_set_admin_state__(netdev, up);
892 ovs_mutex_unlock(&netdev->mutex);
893 }
894 ovs_mutex_unlock(&dummy_list_mutex);
895 }
896 unixctl_command_reply(conn, "OK");
897 }
898
899 void
900 netdev_dummy_register(bool override)
901 {
902 unixctl_command_register("netdev-dummy/receive", "NAME PACKET|FLOW...",
903 2, INT_MAX, netdev_dummy_receive, NULL);
904 unixctl_command_register("netdev-dummy/set-admin-state",
905 "[netdev] up|down", 1, 2,
906 netdev_dummy_set_admin_state, NULL);
907
908 if (override) {
909 struct sset types;
910 const char *type;
911
912 sset_init(&types);
913 netdev_enumerate_types(&types);
914 SSET_FOR_EACH (type, &types) {
915 if (!netdev_unregister_provider(type)) {
916 struct netdev_class *class;
917 int error;
918
919 class = xmemdup(&dummy_class, sizeof dummy_class);
920 class->type = xstrdup(type);
921 error = netdev_register_provider(class);
922 if (error) {
923 VLOG_ERR("%s: failed to register netdev provider (%s)",
924 type, ovs_strerror(error));
925 free(CONST_CAST(char *, class->type));
926 free(class);
927 }
928 }
929 }
930 sset_destroy(&types);
931 }
932 netdev_register_provider(&dummy_class);
933
934 netdev_vport_tunnel_register();
935 }