]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-vport.c
netdev: Add function for configuring tx and rx queues.
[mirror_ovs.git] / lib / netdev-vport.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "netdev-vport.h"
20
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <sys/socket.h>
24 #include <net/if.h>
25 #include <sys/ioctl.h>
26
27 #include "byte-order.h"
28 #include "daemon.h"
29 #include "dirs.h"
30 #include "dpif.h"
31 #include "hash.h"
32 #include "hmap.h"
33 #include "list.h"
34 #include "netdev-provider.h"
35 #include "ofpbuf.h"
36 #include "packets.h"
37 #include "poll-loop.h"
38 #include "route-table.h"
39 #include "shash.h"
40 #include "socket-util.h"
41 #include "vlog.h"
42
43 VLOG_DEFINE_THIS_MODULE(netdev_vport);
44
45 #define GENEVE_DST_PORT 6081
46 #define VXLAN_DST_PORT 4789
47 #define LISP_DST_PORT 4341
48
49 #define DEFAULT_TTL 64
50
51 struct netdev_vport {
52 struct netdev up;
53
54 /* Protects all members below. */
55 struct ovs_mutex mutex;
56
57 uint8_t etheraddr[ETH_ADDR_LEN];
58 struct netdev_stats stats;
59
60 /* Tunnels. */
61 struct netdev_tunnel_config tnl_cfg;
62 char egress_iface[IFNAMSIZ];
63 bool carrier_status;
64
65 /* Patch Ports. */
66 char *peer;
67 };
68
69 struct vport_class {
70 const char *dpif_port;
71 struct netdev_class netdev_class;
72 };
73
74 /* Last read of the route-table's change number. */
75 static uint64_t rt_change_seqno;
76
77 static int netdev_vport_construct(struct netdev *);
78 static int get_patch_config(const struct netdev *netdev, struct smap *args);
79 static int get_tunnel_config(const struct netdev *, struct smap *args);
80 static bool tunnel_check_status_change__(struct netdev_vport *);
81
82 static bool
83 is_vport_class(const struct netdev_class *class)
84 {
85 return class->construct == netdev_vport_construct;
86 }
87
88 bool
89 netdev_vport_is_vport_class(const struct netdev_class *class)
90 {
91 return is_vport_class(class);
92 }
93
94 static const struct vport_class *
95 vport_class_cast(const struct netdev_class *class)
96 {
97 ovs_assert(is_vport_class(class));
98 return CONTAINER_OF(class, struct vport_class, netdev_class);
99 }
100
101 static struct netdev_vport *
102 netdev_vport_cast(const struct netdev *netdev)
103 {
104 ovs_assert(is_vport_class(netdev_get_class(netdev)));
105 return CONTAINER_OF(netdev, struct netdev_vport, up);
106 }
107
108 static const struct netdev_tunnel_config *
109 get_netdev_tunnel_config(const struct netdev *netdev)
110 {
111 return &netdev_vport_cast(netdev)->tnl_cfg;
112 }
113
114 bool
115 netdev_vport_is_patch(const struct netdev *netdev)
116 {
117 const struct netdev_class *class = netdev_get_class(netdev);
118
119 return class->get_config == get_patch_config;
120 }
121
122 bool
123 netdev_vport_is_layer3(const struct netdev *dev)
124 {
125 const char *type = netdev_get_type(dev);
126
127 return (!strcmp("lisp", type));
128 }
129
130 static bool
131 netdev_vport_needs_dst_port(const struct netdev *dev)
132 {
133 const struct netdev_class *class = netdev_get_class(dev);
134 const char *type = netdev_get_type(dev);
135
136 return (class->get_config == get_tunnel_config &&
137 (!strcmp("geneve", type) || !strcmp("vxlan", type) ||
138 !strcmp("lisp", type)));
139 }
140
141 const char *
142 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
143 {
144 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
145 }
146
147 const char *
148 netdev_vport_get_dpif_port(const struct netdev *netdev,
149 char namebuf[], size_t bufsize)
150 {
151 const struct netdev_class *class = netdev_get_class(netdev);
152 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
153
154 if (!dpif_port) {
155 return netdev_get_name(netdev);
156 }
157
158 if (netdev_vport_needs_dst_port(netdev)) {
159 const struct netdev_vport *vport = netdev_vport_cast(netdev);
160
161 /*
162 * Note: IFNAMSIZ is 16 bytes long. Implementations should choose
163 * a dpif port name that is short enough to fit including any
164 * port numbers but assert just in case.
165 */
166 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
167 ovs_assert(strlen(dpif_port) + 6 < IFNAMSIZ);
168 snprintf(namebuf, bufsize, "%s_%d", dpif_port,
169 ntohs(vport->tnl_cfg.dst_port));
170 return namebuf;
171 } else {
172 return dpif_port;
173 }
174 }
175
176 char *
177 netdev_vport_get_dpif_port_strdup(const struct netdev *netdev)
178 {
179 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
180
181 return xstrdup(netdev_vport_get_dpif_port(netdev, namebuf,
182 sizeof namebuf));
183 }
184
185 /* Whenever the route-table change number is incremented,
186 * netdev_vport_route_changed() should be called to update
187 * the corresponding tunnel interface status. */
188 static void
189 netdev_vport_route_changed(void)
190 {
191 struct netdev **vports;
192 size_t i, n_vports;
193
194 vports = netdev_get_vports(&n_vports);
195 for (i = 0; i < n_vports; i++) {
196 struct netdev *netdev_ = vports[i];
197 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
198
199 ovs_mutex_lock(&netdev->mutex);
200 /* Finds all tunnel vports. */
201 if (netdev->tnl_cfg.ip_dst) {
202 if (tunnel_check_status_change__(netdev)) {
203 netdev_change_seq_changed(netdev_);
204 }
205 }
206 ovs_mutex_unlock(&netdev->mutex);
207
208 netdev_close(netdev_);
209 }
210
211 free(vports);
212 }
213
214 static struct netdev *
215 netdev_vport_alloc(void)
216 {
217 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
218 return &netdev->up;
219 }
220
221 static int
222 netdev_vport_construct(struct netdev *netdev_)
223 {
224 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
225
226 ovs_mutex_init(&netdev->mutex);
227 eth_addr_random(netdev->etheraddr);
228
229 route_table_register();
230
231 return 0;
232 }
233
234 static void
235 netdev_vport_destruct(struct netdev *netdev_)
236 {
237 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
238
239 route_table_unregister();
240 free(netdev->peer);
241 ovs_mutex_destroy(&netdev->mutex);
242 }
243
244 static void
245 netdev_vport_dealloc(struct netdev *netdev_)
246 {
247 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
248 free(netdev);
249 }
250
251 static int
252 netdev_vport_set_etheraddr(struct netdev *netdev_,
253 const uint8_t mac[ETH_ADDR_LEN])
254 {
255 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
256
257 ovs_mutex_lock(&netdev->mutex);
258 memcpy(netdev->etheraddr, mac, ETH_ADDR_LEN);
259 ovs_mutex_unlock(&netdev->mutex);
260 netdev_change_seq_changed(netdev_);
261
262 return 0;
263 }
264
265 static int
266 netdev_vport_get_etheraddr(const struct netdev *netdev_,
267 uint8_t mac[ETH_ADDR_LEN])
268 {
269 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
270
271 ovs_mutex_lock(&netdev->mutex);
272 memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
273 ovs_mutex_unlock(&netdev->mutex);
274
275 return 0;
276 }
277
278 /* Checks if the tunnel status has changed and returns a boolean.
279 * Updates the tunnel status if it has changed. */
280 static bool
281 tunnel_check_status_change__(struct netdev_vport *netdev)
282 OVS_REQUIRES(netdev->mutex)
283 {
284 char iface[IFNAMSIZ];
285 bool status = false;
286 ovs_be32 route;
287
288 iface[0] = '\0';
289 route = netdev->tnl_cfg.ip_dst;
290 if (route_table_get_name(route, iface)) {
291 struct netdev *egress_netdev;
292
293 if (!netdev_open(iface, "system", &egress_netdev)) {
294 status = netdev_get_carrier(egress_netdev);
295 netdev_close(egress_netdev);
296 }
297 }
298
299 if (strcmp(netdev->egress_iface, iface)
300 || netdev->carrier_status != status) {
301 ovs_strlcpy(netdev->egress_iface, iface, IFNAMSIZ);
302 netdev->carrier_status = status;
303
304 return true;
305 }
306
307 return false;
308 }
309
310 static int
311 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
312 {
313 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
314
315 if (netdev->egress_iface[0]) {
316 smap_add(smap, "tunnel_egress_iface", netdev->egress_iface);
317
318 smap_add(smap, "tunnel_egress_iface_carrier",
319 netdev->carrier_status ? "up" : "down");
320 }
321
322 return 0;
323 }
324
325 static int
326 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
327 enum netdev_flags off,
328 enum netdev_flags on OVS_UNUSED,
329 enum netdev_flags *old_flagsp)
330 {
331 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
332 return EOPNOTSUPP;
333 }
334
335 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
336 return 0;
337 }
338
339 static void
340 netdev_vport_run(void)
341 {
342 uint64_t seq;
343
344 route_table_run();
345 seq = route_table_get_change_seq();
346 if (rt_change_seqno != seq) {
347 rt_change_seqno = seq;
348 netdev_vport_route_changed();
349 }
350 }
351
352 static void
353 netdev_vport_wait(void)
354 {
355 uint64_t seq;
356
357 route_table_wait();
358 seq = route_table_get_change_seq();
359 if (rt_change_seqno != seq) {
360 poll_immediate_wake();
361 }
362 }
363 \f
364 /* Code specific to tunnel types. */
365
366 static ovs_be64
367 parse_key(const struct smap *args, const char *name,
368 bool *present, bool *flow)
369 {
370 const char *s;
371
372 *present = false;
373 *flow = false;
374
375 s = smap_get(args, name);
376 if (!s) {
377 s = smap_get(args, "key");
378 if (!s) {
379 return 0;
380 }
381 }
382
383 *present = true;
384
385 if (!strcmp(s, "flow")) {
386 *flow = true;
387 return 0;
388 } else {
389 return htonll(strtoull(s, NULL, 0));
390 }
391 }
392
393 static int
394 set_tunnel_config(struct netdev *dev_, const struct smap *args)
395 {
396 struct netdev_vport *dev = netdev_vport_cast(dev_);
397 const char *name = netdev_get_name(dev_);
398 const char *type = netdev_get_type(dev_);
399 bool ipsec_mech_set, needs_dst_port, has_csum;
400 struct netdev_tunnel_config tnl_cfg;
401 struct smap_node *node;
402
403 has_csum = strstr(type, "gre");
404 ipsec_mech_set = false;
405 memset(&tnl_cfg, 0, sizeof tnl_cfg);
406
407 needs_dst_port = netdev_vport_needs_dst_port(dev_);
408 tnl_cfg.ipsec = strstr(type, "ipsec");
409 tnl_cfg.dont_fragment = true;
410
411 SMAP_FOR_EACH (node, args) {
412 if (!strcmp(node->key, "remote_ip")) {
413 struct in_addr in_addr;
414 if (!strcmp(node->value, "flow")) {
415 tnl_cfg.ip_dst_flow = true;
416 tnl_cfg.ip_dst = htonl(0);
417 } else if (lookup_ip(node->value, &in_addr)) {
418 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
419 } else if (ip_is_multicast(in_addr.s_addr)) {
420 VLOG_WARN("%s: multicast remote_ip="IP_FMT" not allowed",
421 name, IP_ARGS(in_addr.s_addr));
422 return EINVAL;
423 } else {
424 tnl_cfg.ip_dst = in_addr.s_addr;
425 }
426 } else if (!strcmp(node->key, "local_ip")) {
427 struct in_addr in_addr;
428 if (!strcmp(node->value, "flow")) {
429 tnl_cfg.ip_src_flow = true;
430 tnl_cfg.ip_src = htonl(0);
431 } else if (lookup_ip(node->value, &in_addr)) {
432 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
433 } else {
434 tnl_cfg.ip_src = in_addr.s_addr;
435 }
436 } else if (!strcmp(node->key, "tos")) {
437 if (!strcmp(node->value, "inherit")) {
438 tnl_cfg.tos_inherit = true;
439 } else {
440 char *endptr;
441 int tos;
442 tos = strtol(node->value, &endptr, 0);
443 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
444 tnl_cfg.tos = tos;
445 } else {
446 VLOG_WARN("%s: invalid TOS %s", name, node->value);
447 }
448 }
449 } else if (!strcmp(node->key, "ttl")) {
450 if (!strcmp(node->value, "inherit")) {
451 tnl_cfg.ttl_inherit = true;
452 } else {
453 tnl_cfg.ttl = atoi(node->value);
454 }
455 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
456 tnl_cfg.dst_port = htons(atoi(node->value));
457 } else if (!strcmp(node->key, "csum") && has_csum) {
458 if (!strcmp(node->value, "true")) {
459 tnl_cfg.csum = true;
460 }
461 } else if (!strcmp(node->key, "df_default")) {
462 if (!strcmp(node->value, "false")) {
463 tnl_cfg.dont_fragment = false;
464 }
465 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
466 if (smap_get(args, "certificate")) {
467 ipsec_mech_set = true;
468 } else {
469 const char *use_ssl_cert;
470
471 /* If the "use_ssl_cert" is true, then "certificate" and
472 * "private_key" will be pulled from the SSL table. The
473 * use of this option is strongly discouraged, since it
474 * will like be removed when multiple SSL configurations
475 * are supported by OVS.
476 */
477 use_ssl_cert = smap_get(args, "use_ssl_cert");
478 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
479 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
480 name);
481 return EINVAL;
482 }
483 ipsec_mech_set = true;
484 }
485 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
486 ipsec_mech_set = true;
487 } else if (tnl_cfg.ipsec
488 && (!strcmp(node->key, "certificate")
489 || !strcmp(node->key, "private_key")
490 || !strcmp(node->key, "use_ssl_cert"))) {
491 /* Ignore options not used by the netdev. */
492 } else if (!strcmp(node->key, "key") ||
493 !strcmp(node->key, "in_key") ||
494 !strcmp(node->key, "out_key")) {
495 /* Handled separately below. */
496 } else {
497 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
498 }
499 }
500
501 /* Add a default destination port for tunnel ports if none specified. */
502 if (!strcmp(type, "geneve") && !tnl_cfg.dst_port) {
503 tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
504 }
505
506 if (!strcmp(type, "vxlan") && !tnl_cfg.dst_port) {
507 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
508 }
509
510 if (!strcmp(type, "lisp") && !tnl_cfg.dst_port) {
511 tnl_cfg.dst_port = htons(LISP_DST_PORT);
512 }
513
514 if (tnl_cfg.ipsec) {
515 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
516 static pid_t pid = 0;
517
518 #ifndef _WIN32
519 ovs_mutex_lock(&mutex);
520 if (pid <= 0) {
521 char *file_name = xasprintf("%s/%s", ovs_rundir(),
522 "ovs-monitor-ipsec.pid");
523 pid = read_pidfile(file_name);
524 free(file_name);
525 }
526 ovs_mutex_unlock(&mutex);
527 #endif
528
529 if (pid < 0) {
530 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
531 name);
532 return EINVAL;
533 }
534
535 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
536 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
537 return EINVAL;
538 }
539
540 if (!ipsec_mech_set) {
541 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
542 name);
543 return EINVAL;
544 }
545 }
546
547 if (!tnl_cfg.ip_dst && !tnl_cfg.ip_dst_flow) {
548 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
549 name, type);
550 return EINVAL;
551 }
552 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
553 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
554 name, type);
555 return EINVAL;
556 }
557 if (!tnl_cfg.ttl) {
558 tnl_cfg.ttl = DEFAULT_TTL;
559 }
560
561 tnl_cfg.in_key = parse_key(args, "in_key",
562 &tnl_cfg.in_key_present,
563 &tnl_cfg.in_key_flow);
564
565 tnl_cfg.out_key = parse_key(args, "out_key",
566 &tnl_cfg.out_key_present,
567 &tnl_cfg.out_key_flow);
568
569 ovs_mutex_lock(&dev->mutex);
570 dev->tnl_cfg = tnl_cfg;
571 tunnel_check_status_change__(dev);
572 netdev_change_seq_changed(dev_);
573 ovs_mutex_unlock(&dev->mutex);
574
575 return 0;
576 }
577
578 static int
579 get_tunnel_config(const struct netdev *dev, struct smap *args)
580 {
581 struct netdev_vport *netdev = netdev_vport_cast(dev);
582 struct netdev_tunnel_config tnl_cfg;
583
584 ovs_mutex_lock(&netdev->mutex);
585 tnl_cfg = netdev->tnl_cfg;
586 ovs_mutex_unlock(&netdev->mutex);
587
588 if (tnl_cfg.ip_dst) {
589 smap_add_format(args, "remote_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_dst));
590 } else if (tnl_cfg.ip_dst_flow) {
591 smap_add(args, "remote_ip", "flow");
592 }
593
594 if (tnl_cfg.ip_src) {
595 smap_add_format(args, "local_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_src));
596 } else if (tnl_cfg.ip_src_flow) {
597 smap_add(args, "local_ip", "flow");
598 }
599
600 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
601 smap_add(args, "key", "flow");
602 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
603 && tnl_cfg.in_key == tnl_cfg.out_key) {
604 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
605 } else {
606 if (tnl_cfg.in_key_flow) {
607 smap_add(args, "in_key", "flow");
608 } else if (tnl_cfg.in_key_present) {
609 smap_add_format(args, "in_key", "%"PRIu64,
610 ntohll(tnl_cfg.in_key));
611 }
612
613 if (tnl_cfg.out_key_flow) {
614 smap_add(args, "out_key", "flow");
615 } else if (tnl_cfg.out_key_present) {
616 smap_add_format(args, "out_key", "%"PRIu64,
617 ntohll(tnl_cfg.out_key));
618 }
619 }
620
621 if (tnl_cfg.ttl_inherit) {
622 smap_add(args, "ttl", "inherit");
623 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
624 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
625 }
626
627 if (tnl_cfg.tos_inherit) {
628 smap_add(args, "tos", "inherit");
629 } else if (tnl_cfg.tos) {
630 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
631 }
632
633 if (tnl_cfg.dst_port) {
634 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
635 const char *type = netdev_get_type(dev);
636
637 if ((!strcmp("geneve", type) && dst_port != GENEVE_DST_PORT) ||
638 (!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
639 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT)) {
640 smap_add_format(args, "dst_port", "%d", dst_port);
641 }
642 }
643
644 if (tnl_cfg.csum) {
645 smap_add(args, "csum", "true");
646 }
647
648 if (!tnl_cfg.dont_fragment) {
649 smap_add(args, "df_default", "false");
650 }
651
652 return 0;
653 }
654 \f
655 /* Code specific to patch ports. */
656
657 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
658 * string that the caller must free.
659 *
660 * If 'netdev' is not a patch port, returns NULL. */
661 char *
662 netdev_vport_patch_peer(const struct netdev *netdev_)
663 {
664 char *peer = NULL;
665
666 if (netdev_vport_is_patch(netdev_)) {
667 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
668
669 ovs_mutex_lock(&netdev->mutex);
670 if (netdev->peer) {
671 peer = xstrdup(netdev->peer);
672 }
673 ovs_mutex_unlock(&netdev->mutex);
674 }
675
676 return peer;
677 }
678
679 void
680 netdev_vport_inc_rx(const struct netdev *netdev,
681 const struct dpif_flow_stats *stats)
682 {
683 if (is_vport_class(netdev_get_class(netdev))) {
684 struct netdev_vport *dev = netdev_vport_cast(netdev);
685
686 ovs_mutex_lock(&dev->mutex);
687 dev->stats.rx_packets += stats->n_packets;
688 dev->stats.rx_bytes += stats->n_bytes;
689 ovs_mutex_unlock(&dev->mutex);
690 }
691 }
692
693 void
694 netdev_vport_inc_tx(const struct netdev *netdev,
695 const struct dpif_flow_stats *stats)
696 {
697 if (is_vport_class(netdev_get_class(netdev))) {
698 struct netdev_vport *dev = netdev_vport_cast(netdev);
699
700 ovs_mutex_lock(&dev->mutex);
701 dev->stats.tx_packets += stats->n_packets;
702 dev->stats.tx_bytes += stats->n_bytes;
703 ovs_mutex_unlock(&dev->mutex);
704 }
705 }
706
707 static int
708 get_patch_config(const struct netdev *dev_, struct smap *args)
709 {
710 struct netdev_vport *dev = netdev_vport_cast(dev_);
711
712 ovs_mutex_lock(&dev->mutex);
713 if (dev->peer) {
714 smap_add(args, "peer", dev->peer);
715 }
716 ovs_mutex_unlock(&dev->mutex);
717
718 return 0;
719 }
720
721 static int
722 set_patch_config(struct netdev *dev_, const struct smap *args)
723 {
724 struct netdev_vport *dev = netdev_vport_cast(dev_);
725 const char *name = netdev_get_name(dev_);
726 const char *peer;
727
728 peer = smap_get(args, "peer");
729 if (!peer) {
730 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
731 return EINVAL;
732 }
733
734 if (smap_count(args) > 1) {
735 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
736 return EINVAL;
737 }
738
739 if (!strcmp(name, peer)) {
740 VLOG_ERR("%s: patch peer must not be self", name);
741 return EINVAL;
742 }
743
744 ovs_mutex_lock(&dev->mutex);
745 free(dev->peer);
746 dev->peer = xstrdup(peer);
747 netdev_change_seq_changed(dev_);
748 ovs_mutex_unlock(&dev->mutex);
749
750 return 0;
751 }
752
753 static int
754 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
755 {
756 struct netdev_vport *dev = netdev_vport_cast(netdev);
757
758 ovs_mutex_lock(&dev->mutex);
759 *stats = dev->stats;
760 ovs_mutex_unlock(&dev->mutex);
761
762 return 0;
763 }
764 \f
765 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
766 GET_TUNNEL_CONFIG, GET_STATUS) \
767 NULL, \
768 netdev_vport_run, \
769 netdev_vport_wait, \
770 \
771 netdev_vport_alloc, \
772 netdev_vport_construct, \
773 netdev_vport_destruct, \
774 netdev_vport_dealloc, \
775 GET_CONFIG, \
776 SET_CONFIG, \
777 GET_TUNNEL_CONFIG, \
778 NULL, /* get_numa_id */ \
779 NULL, /* set_multiq */ \
780 \
781 NULL, /* send */ \
782 NULL, /* send_wait */ \
783 \
784 netdev_vport_set_etheraddr, \
785 netdev_vport_get_etheraddr, \
786 NULL, /* get_mtu */ \
787 NULL, /* set_mtu */ \
788 NULL, /* get_ifindex */ \
789 NULL, /* get_carrier */ \
790 NULL, /* get_carrier_resets */ \
791 NULL, /* get_miimon */ \
792 get_stats, \
793 \
794 NULL, /* get_features */ \
795 NULL, /* set_advertisements */ \
796 \
797 NULL, /* set_policing */ \
798 NULL, /* get_qos_types */ \
799 NULL, /* get_qos_capabilities */ \
800 NULL, /* get_qos */ \
801 NULL, /* set_qos */ \
802 NULL, /* get_queue */ \
803 NULL, /* set_queue */ \
804 NULL, /* delete_queue */ \
805 NULL, /* get_queue_stats */ \
806 NULL, /* queue_dump_start */ \
807 NULL, /* queue_dump_next */ \
808 NULL, /* queue_dump_done */ \
809 NULL, /* dump_queue_stats */ \
810 \
811 NULL, /* get_in4 */ \
812 NULL, /* set_in4 */ \
813 NULL, /* get_in6 */ \
814 NULL, /* add_router */ \
815 NULL, /* get_next_hop */ \
816 GET_STATUS, \
817 NULL, /* arp_lookup */ \
818 \
819 netdev_vport_update_flags, \
820 \
821 NULL, /* rx_alloc */ \
822 NULL, /* rx_construct */ \
823 NULL, /* rx_destruct */ \
824 NULL, /* rx_dealloc */ \
825 NULL, /* rx_recv */ \
826 NULL, /* rx_wait */ \
827 NULL, /* rx_drain */
828
829 #define TUNNEL_CLASS(NAME, DPIF_PORT) \
830 { DPIF_PORT, \
831 { NAME, VPORT_FUNCTIONS(get_tunnel_config, \
832 set_tunnel_config, \
833 get_netdev_tunnel_config, \
834 tunnel_get_status) }}
835
836 void
837 netdev_vport_tunnel_register(void)
838 {
839 /* The name of the dpif_port should be short enough to accomodate adding
840 * a port number to the end if one is necessary. */
841 static const struct vport_class vport_classes[] = {
842 TUNNEL_CLASS("geneve", "genev_sys"),
843 TUNNEL_CLASS("gre", "gre_sys"),
844 TUNNEL_CLASS("ipsec_gre", "gre_sys"),
845 TUNNEL_CLASS("gre64", "gre64_sys"),
846 TUNNEL_CLASS("ipsec_gre64", "gre64_sys"),
847 TUNNEL_CLASS("vxlan", "vxlan_sys"),
848 TUNNEL_CLASS("lisp", "lisp_sys")
849 };
850 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
851
852 if (ovsthread_once_start(&once)) {
853 int i;
854
855 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
856 netdev_register_provider(&vport_classes[i].netdev_class);
857 }
858 ovsthread_once_done(&once);
859 }
860 }
861
862 void
863 netdev_vport_patch_register(void)
864 {
865 static const struct vport_class patch_class =
866 { NULL,
867 { "patch", VPORT_FUNCTIONS(get_patch_config,
868 set_patch_config,
869 NULL,
870 NULL) }};
871 netdev_register_provider(&patch_class.netdev_class);
872 }