]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-vport.c
netdev-vport: remove unused function
[mirror_ovs.git] / lib / netdev-vport.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 * Copyright (c) 2016 Red Hat, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <config.h>
19
20 #include "netdev-vport.h"
21
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <sys/socket.h>
25 #include <net/if.h>
26 #include <netinet/in.h>
27 #include <netinet/ip6.h>
28 #include <sys/ioctl.h>
29
30 #include "byte-order.h"
31 #include "daemon.h"
32 #include "dirs.h"
33 #include "dpif.h"
34 #include "netdev.h"
35 #include "netdev-native-tnl.h"
36 #include "netdev-provider.h"
37 #include "netdev-vport-private.h"
38 #include "ovs-router.h"
39 #include "packets.h"
40 #include "poll-loop.h"
41 #include "route-table.h"
42 #include "smap.h"
43 #include "socket-util.h"
44 #include "unaligned.h"
45 #include "unixctl.h"
46 #include "openvswitch/vlog.h"
47
48 VLOG_DEFINE_THIS_MODULE(netdev_vport);
49
50 #define GENEVE_DST_PORT 6081
51 #define VXLAN_DST_PORT 4789
52 #define LISP_DST_PORT 4341
53 #define STT_DST_PORT 7471
54
55 #define DEFAULT_TTL 64
56
57 /* Last read of the route-table's change number. */
58 static uint64_t rt_change_seqno;
59
60 static int get_patch_config(const struct netdev *netdev, struct smap *args);
61 static int get_tunnel_config(const struct netdev *, struct smap *args);
62 static bool tunnel_check_status_change__(struct netdev_vport *);
63
64 struct vport_class {
65 const char *dpif_port;
66 struct netdev_class netdev_class;
67 };
68
69 bool
70 netdev_vport_is_vport_class(const struct netdev_class *class)
71 {
72 return is_vport_class(class);
73 }
74
75 static const struct vport_class *
76 vport_class_cast(const struct netdev_class *class)
77 {
78 ovs_assert(is_vport_class(class));
79 return CONTAINER_OF(class, struct vport_class, netdev_class);
80 }
81
82 static const struct netdev_tunnel_config *
83 get_netdev_tunnel_config(const struct netdev *netdev)
84 {
85 return &netdev_vport_cast(netdev)->tnl_cfg;
86 }
87
88 bool
89 netdev_vport_is_patch(const struct netdev *netdev)
90 {
91 const struct netdev_class *class = netdev_get_class(netdev);
92
93 return class->get_config == get_patch_config;
94 }
95
96 bool
97 netdev_vport_is_layer3(const struct netdev *dev)
98 {
99 const char *type = netdev_get_type(dev);
100
101 return (!strcmp("lisp", type));
102 }
103
104 static bool
105 netdev_vport_needs_dst_port(const struct netdev *dev)
106 {
107 const struct netdev_class *class = netdev_get_class(dev);
108 const char *type = netdev_get_type(dev);
109
110 return (class->get_config == get_tunnel_config &&
111 (!strcmp("geneve", type) || !strcmp("vxlan", type) ||
112 !strcmp("lisp", type) || !strcmp("stt", type)) );
113 }
114
115 const char *
116 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
117 {
118 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
119 }
120
121 const char *
122 netdev_vport_get_dpif_port(const struct netdev *netdev,
123 char namebuf[], size_t bufsize)
124 {
125 const struct netdev_class *class = netdev_get_class(netdev);
126 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
127
128 if (!dpif_port) {
129 return netdev_get_name(netdev);
130 }
131
132 if (netdev_vport_needs_dst_port(netdev)) {
133 const struct netdev_vport *vport = netdev_vport_cast(netdev);
134
135 /*
136 * Note: IFNAMSIZ is 16 bytes long. Implementations should choose
137 * a dpif port name that is short enough to fit including any
138 * port numbers but assert just in case.
139 */
140 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
141 ovs_assert(strlen(dpif_port) + 6 < IFNAMSIZ);
142 snprintf(namebuf, bufsize, "%s_%d", dpif_port,
143 ntohs(vport->tnl_cfg.dst_port));
144 return namebuf;
145 } else {
146 return dpif_port;
147 }
148 }
149
150 /* Whenever the route-table change number is incremented,
151 * netdev_vport_route_changed() should be called to update
152 * the corresponding tunnel interface status. */
153 static void
154 netdev_vport_route_changed(void)
155 {
156 struct netdev **vports;
157 size_t i, n_vports;
158
159 vports = netdev_get_vports(&n_vports);
160 for (i = 0; i < n_vports; i++) {
161 struct netdev *netdev_ = vports[i];
162 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
163
164 ovs_mutex_lock(&netdev->mutex);
165 /* Finds all tunnel vports. */
166 if (ipv6_addr_is_set(&netdev->tnl_cfg.ipv6_dst)) {
167 if (tunnel_check_status_change__(netdev)) {
168 netdev_change_seq_changed(netdev_);
169 }
170 }
171 ovs_mutex_unlock(&netdev->mutex);
172
173 netdev_close(netdev_);
174 }
175
176 free(vports);
177 }
178
179 static struct netdev *
180 netdev_vport_alloc(void)
181 {
182 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
183 return &netdev->up;
184 }
185
186 int
187 netdev_vport_construct(struct netdev *netdev_)
188 {
189 struct netdev_vport *dev = netdev_vport_cast(netdev_);
190 const char *type = netdev_get_type(netdev_);
191
192 ovs_mutex_init(&dev->mutex);
193 eth_addr_random(&dev->etheraddr);
194
195 /* Add a default destination port for tunnel ports if none specified. */
196 if (!strcmp(type, "geneve")) {
197 dev->tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
198 } else if (!strcmp(type, "vxlan")) {
199 dev->tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
200 } else if (!strcmp(type, "lisp")) {
201 dev->tnl_cfg.dst_port = htons(LISP_DST_PORT);
202 } else if (!strcmp(type, "stt")) {
203 dev->tnl_cfg.dst_port = htons(STT_DST_PORT);
204 }
205
206 dev->tnl_cfg.dont_fragment = true;
207 dev->tnl_cfg.ttl = DEFAULT_TTL;
208 return 0;
209 }
210
211 static void
212 netdev_vport_destruct(struct netdev *netdev_)
213 {
214 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
215
216 free(netdev->peer);
217 ovs_mutex_destroy(&netdev->mutex);
218 }
219
220 static void
221 netdev_vport_dealloc(struct netdev *netdev_)
222 {
223 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
224 free(netdev);
225 }
226
227 static int
228 netdev_vport_set_etheraddr(struct netdev *netdev_, const struct eth_addr mac)
229 {
230 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
231
232 ovs_mutex_lock(&netdev->mutex);
233 netdev->etheraddr = mac;
234 ovs_mutex_unlock(&netdev->mutex);
235 netdev_change_seq_changed(netdev_);
236
237 return 0;
238 }
239
240 static int
241 netdev_vport_get_etheraddr(const struct netdev *netdev_, struct eth_addr *mac)
242 {
243 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
244
245 ovs_mutex_lock(&netdev->mutex);
246 *mac = netdev->etheraddr;
247 ovs_mutex_unlock(&netdev->mutex);
248
249 return 0;
250 }
251
252 /* Checks if the tunnel status has changed and returns a boolean.
253 * Updates the tunnel status if it has changed. */
254 static bool
255 tunnel_check_status_change__(struct netdev_vport *netdev)
256 OVS_REQUIRES(netdev->mutex)
257 {
258 char iface[IFNAMSIZ];
259 bool status = false;
260 struct in6_addr *route;
261 struct in6_addr gw;
262
263 iface[0] = '\0';
264 route = &netdev->tnl_cfg.ipv6_dst;
265 if (ovs_router_lookup(route, iface, NULL, &gw)) {
266 struct netdev *egress_netdev;
267
268 if (!netdev_open(iface, NULL, &egress_netdev)) {
269 status = netdev_get_carrier(egress_netdev);
270 netdev_close(egress_netdev);
271 }
272 }
273
274 if (strcmp(netdev->egress_iface, iface)
275 || netdev->carrier_status != status) {
276 ovs_strlcpy(netdev->egress_iface, iface, IFNAMSIZ);
277 netdev->carrier_status = status;
278
279 return true;
280 }
281
282 return false;
283 }
284
285 static int
286 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
287 {
288 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
289
290 if (netdev->egress_iface[0]) {
291 smap_add(smap, "tunnel_egress_iface", netdev->egress_iface);
292
293 smap_add(smap, "tunnel_egress_iface_carrier",
294 netdev->carrier_status ? "up" : "down");
295 }
296
297 return 0;
298 }
299
300 static int
301 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
302 enum netdev_flags off,
303 enum netdev_flags on OVS_UNUSED,
304 enum netdev_flags *old_flagsp)
305 {
306 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
307 return EOPNOTSUPP;
308 }
309
310 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
311 return 0;
312 }
313
314 static void
315 netdev_vport_run(void)
316 {
317 uint64_t seq;
318
319 route_table_run();
320 seq = route_table_get_change_seq();
321 if (rt_change_seqno != seq) {
322 rt_change_seqno = seq;
323 netdev_vport_route_changed();
324 }
325 }
326
327 static void
328 netdev_vport_wait(void)
329 {
330 uint64_t seq;
331
332 route_table_wait();
333 seq = route_table_get_change_seq();
334 if (rt_change_seqno != seq) {
335 poll_immediate_wake();
336 }
337 }
338 \f
339 /* Code specific to tunnel types. */
340
341 static ovs_be64
342 parse_key(const struct smap *args, const char *name,
343 bool *present, bool *flow)
344 {
345 const char *s;
346
347 *present = false;
348 *flow = false;
349
350 s = smap_get(args, name);
351 if (!s) {
352 s = smap_get(args, "key");
353 if (!s) {
354 return 0;
355 }
356 }
357
358 *present = true;
359
360 if (!strcmp(s, "flow")) {
361 *flow = true;
362 return 0;
363 } else {
364 return htonll(strtoull(s, NULL, 0));
365 }
366 }
367
368 static int
369 parse_tunnel_ip(const char *value, bool accept_mcast, bool *flow,
370 struct in6_addr *ipv6, uint16_t *protocol)
371 {
372 if (!strcmp(value, "flow")) {
373 *flow = true;
374 *protocol = 0;
375 return 0;
376 }
377 if (addr_is_ipv6(value)) {
378 if (lookup_ipv6(value, ipv6)) {
379 return ENOENT;
380 }
381 if (!accept_mcast && ipv6_addr_is_multicast(ipv6)) {
382 return EINVAL;
383 }
384 *protocol = ETH_TYPE_IPV6;
385 } else {
386 struct in_addr ip;
387 if (lookup_ip(value, &ip)) {
388 return ENOENT;
389 }
390 if (!accept_mcast && ip_is_multicast(ip.s_addr)) {
391 return EINVAL;
392 }
393 in6_addr_set_mapped_ipv4(ipv6, ip.s_addr);
394 *protocol = ETH_TYPE_IP;
395 }
396 return 0;
397 }
398
399 static int
400 set_tunnel_config(struct netdev *dev_, const struct smap *args)
401 {
402 struct netdev_vport *dev = netdev_vport_cast(dev_);
403 const char *name = netdev_get_name(dev_);
404 const char *type = netdev_get_type(dev_);
405 bool ipsec_mech_set, needs_dst_port, has_csum;
406 uint16_t dst_proto = 0, src_proto = 0;
407 struct netdev_tunnel_config tnl_cfg;
408 struct smap_node *node;
409
410 has_csum = strstr(type, "gre") || strstr(type, "geneve") ||
411 strstr(type, "stt") || strstr(type, "vxlan");
412 ipsec_mech_set = false;
413 memset(&tnl_cfg, 0, sizeof tnl_cfg);
414
415 /* Add a default destination port for tunnel ports if none specified. */
416 if (!strcmp(type, "geneve")) {
417 tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
418 }
419
420 if (!strcmp(type, "vxlan")) {
421 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
422 }
423
424 if (!strcmp(type, "lisp")) {
425 tnl_cfg.dst_port = htons(LISP_DST_PORT);
426 }
427
428 if (!strcmp(type, "stt")) {
429 tnl_cfg.dst_port = htons(STT_DST_PORT);
430 }
431
432 needs_dst_port = netdev_vport_needs_dst_port(dev_);
433 tnl_cfg.ipsec = strstr(type, "ipsec");
434 tnl_cfg.dont_fragment = true;
435
436 SMAP_FOR_EACH (node, args) {
437 if (!strcmp(node->key, "remote_ip")) {
438 int err;
439 err = parse_tunnel_ip(node->value, false, &tnl_cfg.ip_dst_flow,
440 &tnl_cfg.ipv6_dst, &dst_proto);
441 switch (err) {
442 case ENOENT:
443 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
444 break;
445 case EINVAL:
446 VLOG_WARN("%s: multicast remote_ip=%s not allowed",
447 name, node->value);
448 return EINVAL;
449 }
450 } else if (!strcmp(node->key, "local_ip")) {
451 int err;
452 err = parse_tunnel_ip(node->value, true, &tnl_cfg.ip_src_flow,
453 &tnl_cfg.ipv6_src, &src_proto);
454 switch (err) {
455 case ENOENT:
456 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
457 break;
458 }
459 } else if (!strcmp(node->key, "tos")) {
460 if (!strcmp(node->value, "inherit")) {
461 tnl_cfg.tos_inherit = true;
462 } else {
463 char *endptr;
464 int tos;
465 tos = strtol(node->value, &endptr, 0);
466 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
467 tnl_cfg.tos = tos;
468 } else {
469 VLOG_WARN("%s: invalid TOS %s", name, node->value);
470 }
471 }
472 } else if (!strcmp(node->key, "ttl")) {
473 if (!strcmp(node->value, "inherit")) {
474 tnl_cfg.ttl_inherit = true;
475 } else {
476 tnl_cfg.ttl = atoi(node->value);
477 }
478 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
479 tnl_cfg.dst_port = htons(atoi(node->value));
480 } else if (!strcmp(node->key, "csum") && has_csum) {
481 if (!strcmp(node->value, "true")) {
482 tnl_cfg.csum = true;
483 }
484 } else if (!strcmp(node->key, "df_default")) {
485 if (!strcmp(node->value, "false")) {
486 tnl_cfg.dont_fragment = false;
487 }
488 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
489 if (smap_get(args, "certificate")) {
490 ipsec_mech_set = true;
491 } else {
492 const char *use_ssl_cert;
493
494 /* If the "use_ssl_cert" is true, then "certificate" and
495 * "private_key" will be pulled from the SSL table. The
496 * use of this option is strongly discouraged, since it
497 * will like be removed when multiple SSL configurations
498 * are supported by OVS.
499 */
500 use_ssl_cert = smap_get(args, "use_ssl_cert");
501 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
502 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
503 name);
504 return EINVAL;
505 }
506 ipsec_mech_set = true;
507 }
508 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
509 ipsec_mech_set = true;
510 } else if (tnl_cfg.ipsec
511 && (!strcmp(node->key, "certificate")
512 || !strcmp(node->key, "private_key")
513 || !strcmp(node->key, "use_ssl_cert"))) {
514 /* Ignore options not used by the netdev. */
515 } else if (!strcmp(node->key, "key") ||
516 !strcmp(node->key, "in_key") ||
517 !strcmp(node->key, "out_key")) {
518 /* Handled separately below. */
519 } else if (!strcmp(node->key, "exts")) {
520 char *str = xstrdup(node->value);
521 char *ext, *save_ptr = NULL;
522
523 tnl_cfg.exts = 0;
524
525 ext = strtok_r(str, ",", &save_ptr);
526 while (ext) {
527 if (!strcmp(type, "vxlan") && !strcmp(ext, "gbp")) {
528 tnl_cfg.exts |= (1 << OVS_VXLAN_EXT_GBP);
529 } else {
530 VLOG_WARN("%s: unknown extension '%s'", name, ext);
531 }
532
533 ext = strtok_r(NULL, ",", &save_ptr);
534 }
535
536 free(str);
537 } else {
538 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
539 }
540 }
541
542 if (tnl_cfg.ipsec) {
543 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
544 static pid_t pid = 0;
545
546 #ifndef _WIN32
547 ovs_mutex_lock(&mutex);
548 if (pid <= 0) {
549 char *file_name = xasprintf("%s/%s", ovs_rundir(),
550 "ovs-monitor-ipsec.pid");
551 pid = read_pidfile(file_name);
552 free(file_name);
553 }
554 ovs_mutex_unlock(&mutex);
555 #endif
556
557 if (pid < 0) {
558 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
559 name);
560 return EINVAL;
561 }
562
563 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
564 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
565 return EINVAL;
566 }
567
568 if (!ipsec_mech_set) {
569 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
570 name);
571 return EINVAL;
572 }
573 }
574
575 if (!ipv6_addr_is_set(&tnl_cfg.ipv6_dst) && !tnl_cfg.ip_dst_flow) {
576 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
577 name, type);
578 return EINVAL;
579 }
580 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
581 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
582 name, type);
583 return EINVAL;
584 }
585 if (src_proto && dst_proto && src_proto != dst_proto) {
586 VLOG_ERR("%s: 'remote_ip' and 'local_ip' has to be of the same address family",
587 name);
588 return EINVAL;
589 }
590 if (!tnl_cfg.ttl) {
591 tnl_cfg.ttl = DEFAULT_TTL;
592 }
593
594 tnl_cfg.in_key = parse_key(args, "in_key",
595 &tnl_cfg.in_key_present,
596 &tnl_cfg.in_key_flow);
597
598 tnl_cfg.out_key = parse_key(args, "out_key",
599 &tnl_cfg.out_key_present,
600 &tnl_cfg.out_key_flow);
601
602 ovs_mutex_lock(&dev->mutex);
603 if (memcmp(&dev->tnl_cfg, &tnl_cfg, sizeof tnl_cfg)) {
604 dev->tnl_cfg = tnl_cfg;
605 tunnel_check_status_change__(dev);
606 netdev_change_seq_changed(dev_);
607 }
608 ovs_mutex_unlock(&dev->mutex);
609
610 return 0;
611 }
612
613 static int
614 get_tunnel_config(const struct netdev *dev, struct smap *args)
615 {
616 struct netdev_vport *netdev = netdev_vport_cast(dev);
617 struct netdev_tunnel_config tnl_cfg;
618
619 ovs_mutex_lock(&netdev->mutex);
620 tnl_cfg = netdev->tnl_cfg;
621 ovs_mutex_unlock(&netdev->mutex);
622
623 if (ipv6_addr_is_set(&tnl_cfg.ipv6_dst)) {
624 smap_add_ipv6(args, "remote_ip", &tnl_cfg.ipv6_dst);
625 } else if (tnl_cfg.ip_dst_flow) {
626 smap_add(args, "remote_ip", "flow");
627 }
628
629 if (ipv6_addr_is_set(&tnl_cfg.ipv6_src)) {
630 smap_add_ipv6(args, "local_ip", &tnl_cfg.ipv6_src);
631 } else if (tnl_cfg.ip_src_flow) {
632 smap_add(args, "local_ip", "flow");
633 }
634
635 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
636 smap_add(args, "key", "flow");
637 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
638 && tnl_cfg.in_key == tnl_cfg.out_key) {
639 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
640 } else {
641 if (tnl_cfg.in_key_flow) {
642 smap_add(args, "in_key", "flow");
643 } else if (tnl_cfg.in_key_present) {
644 smap_add_format(args, "in_key", "%"PRIu64,
645 ntohll(tnl_cfg.in_key));
646 }
647
648 if (tnl_cfg.out_key_flow) {
649 smap_add(args, "out_key", "flow");
650 } else if (tnl_cfg.out_key_present) {
651 smap_add_format(args, "out_key", "%"PRIu64,
652 ntohll(tnl_cfg.out_key));
653 }
654 }
655
656 if (tnl_cfg.ttl_inherit) {
657 smap_add(args, "ttl", "inherit");
658 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
659 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
660 }
661
662 if (tnl_cfg.tos_inherit) {
663 smap_add(args, "tos", "inherit");
664 } else if (tnl_cfg.tos) {
665 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
666 }
667
668 if (tnl_cfg.dst_port) {
669 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
670 const char *type = netdev_get_type(dev);
671
672 if ((!strcmp("geneve", type) && dst_port != GENEVE_DST_PORT) ||
673 (!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
674 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT) ||
675 (!strcmp("stt", type) && dst_port != STT_DST_PORT)) {
676 smap_add_format(args, "dst_port", "%d", dst_port);
677 }
678 }
679
680 if (tnl_cfg.csum) {
681 smap_add(args, "csum", "true");
682 }
683
684 if (!tnl_cfg.dont_fragment) {
685 smap_add(args, "df_default", "false");
686 }
687
688 return 0;
689 }
690 \f
691 /* Code specific to patch ports. */
692
693 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
694 * string that the caller must free.
695 *
696 * If 'netdev' is not a patch port, returns NULL. */
697 char *
698 netdev_vport_patch_peer(const struct netdev *netdev_)
699 {
700 char *peer = NULL;
701
702 if (netdev_vport_is_patch(netdev_)) {
703 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
704
705 ovs_mutex_lock(&netdev->mutex);
706 if (netdev->peer) {
707 peer = xstrdup(netdev->peer);
708 }
709 ovs_mutex_unlock(&netdev->mutex);
710 }
711
712 return peer;
713 }
714
715 void
716 netdev_vport_inc_rx(const struct netdev *netdev,
717 const struct dpif_flow_stats *stats)
718 {
719 if (is_vport_class(netdev_get_class(netdev))) {
720 struct netdev_vport *dev = netdev_vport_cast(netdev);
721
722 ovs_mutex_lock(&dev->mutex);
723 dev->stats.rx_packets += stats->n_packets;
724 dev->stats.rx_bytes += stats->n_bytes;
725 ovs_mutex_unlock(&dev->mutex);
726 }
727 }
728
729 void
730 netdev_vport_inc_tx(const struct netdev *netdev,
731 const struct dpif_flow_stats *stats)
732 {
733 if (is_vport_class(netdev_get_class(netdev))) {
734 struct netdev_vport *dev = netdev_vport_cast(netdev);
735
736 ovs_mutex_lock(&dev->mutex);
737 dev->stats.tx_packets += stats->n_packets;
738 dev->stats.tx_bytes += stats->n_bytes;
739 ovs_mutex_unlock(&dev->mutex);
740 }
741 }
742
743 static int
744 get_patch_config(const struct netdev *dev_, struct smap *args)
745 {
746 struct netdev_vport *dev = netdev_vport_cast(dev_);
747
748 ovs_mutex_lock(&dev->mutex);
749 if (dev->peer) {
750 smap_add(args, "peer", dev->peer);
751 }
752 ovs_mutex_unlock(&dev->mutex);
753
754 return 0;
755 }
756
757 static int
758 set_patch_config(struct netdev *dev_, const struct smap *args)
759 {
760 struct netdev_vport *dev = netdev_vport_cast(dev_);
761 const char *name = netdev_get_name(dev_);
762 const char *peer;
763
764 peer = smap_get(args, "peer");
765 if (!peer) {
766 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
767 return EINVAL;
768 }
769
770 if (smap_count(args) > 1) {
771 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
772 return EINVAL;
773 }
774
775 if (!strcmp(name, peer)) {
776 VLOG_ERR("%s: patch peer must not be self", name);
777 return EINVAL;
778 }
779
780 ovs_mutex_lock(&dev->mutex);
781 if (!dev->peer || strcmp(dev->peer, peer)) {
782 free(dev->peer);
783 dev->peer = xstrdup(peer);
784 netdev_change_seq_changed(dev_);
785 }
786 ovs_mutex_unlock(&dev->mutex);
787
788 return 0;
789 }
790
791 static int
792 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
793 {
794 struct netdev_vport *dev = netdev_vport_cast(netdev);
795
796 ovs_mutex_lock(&dev->mutex);
797 /* Passing only collected counters */
798 stats->tx_packets = dev->stats.tx_packets;
799 stats->tx_bytes = dev->stats.tx_bytes;
800 stats->rx_packets = dev->stats.rx_packets;
801 stats->rx_bytes = dev->stats.rx_bytes;
802 ovs_mutex_unlock(&dev->mutex);
803
804 return 0;
805 }
806
807 \f
808 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
809 GET_TUNNEL_CONFIG, GET_STATUS, \
810 BUILD_HEADER, \
811 PUSH_HEADER, POP_HEADER) \
812 NULL, \
813 netdev_vport_run, \
814 netdev_vport_wait, \
815 \
816 netdev_vport_alloc, \
817 netdev_vport_construct, \
818 netdev_vport_destruct, \
819 netdev_vport_dealloc, \
820 GET_CONFIG, \
821 SET_CONFIG, \
822 GET_TUNNEL_CONFIG, \
823 BUILD_HEADER, \
824 PUSH_HEADER, \
825 POP_HEADER, \
826 NULL, /* get_numa_id */ \
827 NULL, /* set_tx_multiq */ \
828 \
829 NULL, /* send */ \
830 NULL, /* send_wait */ \
831 \
832 netdev_vport_set_etheraddr, \
833 netdev_vport_get_etheraddr, \
834 NULL, /* get_mtu */ \
835 NULL, /* set_mtu */ \
836 NULL, /* get_ifindex */ \
837 NULL, /* get_carrier */ \
838 NULL, /* get_carrier_resets */ \
839 NULL, /* get_miimon */ \
840 get_stats, \
841 \
842 NULL, /* get_features */ \
843 NULL, /* set_advertisements */ \
844 \
845 NULL, /* set_policing */ \
846 NULL, /* get_qos_types */ \
847 NULL, /* get_qos_capabilities */ \
848 NULL, /* get_qos */ \
849 NULL, /* set_qos */ \
850 NULL, /* get_queue */ \
851 NULL, /* set_queue */ \
852 NULL, /* delete_queue */ \
853 NULL, /* get_queue_stats */ \
854 NULL, /* queue_dump_start */ \
855 NULL, /* queue_dump_next */ \
856 NULL, /* queue_dump_done */ \
857 NULL, /* dump_queue_stats */ \
858 \
859 NULL, /* set_in4 */ \
860 NULL, /* get_addr_list */ \
861 NULL, /* add_router */ \
862 NULL, /* get_next_hop */ \
863 GET_STATUS, \
864 NULL, /* arp_lookup */ \
865 \
866 netdev_vport_update_flags, \
867 NULL, /* reconfigure */ \
868 \
869 NULL, /* rx_alloc */ \
870 NULL, /* rx_construct */ \
871 NULL, /* rx_destruct */ \
872 NULL, /* rx_dealloc */ \
873 NULL, /* rx_recv */ \
874 NULL, /* rx_wait */ \
875 NULL, /* rx_drain */
876
877
878 #define TUNNEL_CLASS(NAME, DPIF_PORT, BUILD_HEADER, PUSH_HEADER, POP_HEADER) \
879 { DPIF_PORT, \
880 { NAME, false, \
881 VPORT_FUNCTIONS(get_tunnel_config, \
882 set_tunnel_config, \
883 get_netdev_tunnel_config, \
884 tunnel_get_status, \
885 BUILD_HEADER, PUSH_HEADER, POP_HEADER) }}
886
887 void
888 netdev_vport_tunnel_register(void)
889 {
890 /* The name of the dpif_port should be short enough to accomodate adding
891 * a port number to the end if one is necessary. */
892 static const struct vport_class vport_classes[] = {
893 TUNNEL_CLASS("geneve", "genev_sys", netdev_geneve_build_header,
894 netdev_tnl_push_udp_header,
895 netdev_geneve_pop_header),
896 TUNNEL_CLASS("gre", "gre_sys", netdev_gre_build_header,
897 netdev_gre_push_header,
898 netdev_gre_pop_header),
899 TUNNEL_CLASS("ipsec_gre", "gre_sys", NULL, NULL, NULL),
900 TUNNEL_CLASS("vxlan", "vxlan_sys", netdev_vxlan_build_header,
901 netdev_tnl_push_udp_header,
902 netdev_vxlan_pop_header),
903 TUNNEL_CLASS("lisp", "lisp_sys", NULL, NULL, NULL),
904 TUNNEL_CLASS("stt", "stt_sys", NULL, NULL, NULL),
905 };
906 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
907
908 if (ovsthread_once_start(&once)) {
909 int i;
910
911 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
912 netdev_register_provider(&vport_classes[i].netdev_class);
913 }
914
915 unixctl_command_register("tnl/egress_port_range", "min max", 0, 2,
916 netdev_tnl_egress_port_range, NULL);
917
918 ovsthread_once_done(&once);
919 }
920 }
921
922 void
923 netdev_vport_patch_register(void)
924 {
925 static const struct vport_class patch_class =
926 { NULL,
927 { "patch", false,
928 VPORT_FUNCTIONS(get_patch_config,
929 set_patch_config,
930 NULL,
931 NULL, NULL, NULL, NULL) }};
932 netdev_register_provider(&patch_class.netdev_class);
933 }