2 * Copyright (c) 2010, 2011, 2012, 2013, 2014, 2017 Nicira, Inc.
3 * Copyright (c) 2016 Red Hat, Inc.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
20 #include "netdev-vport.h"
24 #include <sys/socket.h>
26 #include <sys/types.h>
27 #include <netinet/in.h>
28 #include <netinet/ip6.h>
29 #include <sys/ioctl.h>
31 #include "byte-order.h"
36 #include "netdev-native-tnl.h"
37 #include "netdev-provider.h"
38 #include "netdev-vport-private.h"
39 #include "openvswitch/dynamic-string.h"
40 #include "ovs-router.h"
42 #include "openvswitch/poll-loop.h"
43 #include "route-table.h"
45 #include "socket-util.h"
46 #include "unaligned.h"
48 #include "openvswitch/vlog.h"
49 #include "netdev-tc-offloads.h"
51 #include "netdev-linux.h"
54 VLOG_DEFINE_THIS_MODULE(netdev_vport
);
56 #define GENEVE_DST_PORT 6081
57 #define VXLAN_DST_PORT 4789
58 #define LISP_DST_PORT 4341
59 #define STT_DST_PORT 7471
61 #define DEFAULT_TTL 64
63 /* Last read of the route-table's change number. */
64 static uint64_t rt_change_seqno
;
66 static int get_patch_config(const struct netdev
*netdev
, struct smap
*args
);
67 static int get_tunnel_config(const struct netdev
*, struct smap
*args
);
68 static bool tunnel_check_status_change__(struct netdev_vport
*);
71 const char *dpif_port
;
72 struct netdev_class netdev_class
;
76 netdev_vport_is_vport_class(const struct netdev_class
*class)
78 return is_vport_class(class);
81 static const struct vport_class
*
82 vport_class_cast(const struct netdev_class
*class)
84 ovs_assert(is_vport_class(class));
85 return CONTAINER_OF(class, struct vport_class
, netdev_class
);
88 static const struct netdev_tunnel_config
*
89 get_netdev_tunnel_config(const struct netdev
*netdev
)
91 return &netdev_vport_cast(netdev
)->tnl_cfg
;
95 netdev_vport_is_patch(const struct netdev
*netdev
)
97 const struct netdev_class
*class = netdev_get_class(netdev
);
99 return class->get_config
== get_patch_config
;
103 netdev_vport_needs_dst_port(const struct netdev
*dev
)
105 const struct netdev_class
*class = netdev_get_class(dev
);
106 const char *type
= netdev_get_type(dev
);
108 return (class->get_config
== get_tunnel_config
&&
109 (!strcmp("geneve", type
) || !strcmp("vxlan", type
) ||
110 !strcmp("lisp", type
) || !strcmp("stt", type
)) );
114 netdev_vport_class_get_dpif_port(const struct netdev_class
*class)
116 return is_vport_class(class) ? vport_class_cast(class)->dpif_port
: NULL
;
120 netdev_vport_get_dpif_port(const struct netdev
*netdev
,
121 char namebuf
[], size_t bufsize
)
123 const struct netdev_class
*class = netdev_get_class(netdev
);
124 const char *dpif_port
= netdev_vport_class_get_dpif_port(class);
127 return netdev_get_name(netdev
);
130 if (netdev_vport_needs_dst_port(netdev
)) {
131 const struct netdev_vport
*vport
= netdev_vport_cast(netdev
);
134 * Note: IFNAMSIZ is 16 bytes long. Implementations should choose
135 * a dpif port name that is short enough to fit including any
136 * port numbers but assert just in case.
138 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE
>= IFNAMSIZ
);
139 ovs_assert(strlen(dpif_port
) + 6 < IFNAMSIZ
);
140 snprintf(namebuf
, bufsize
, "%s_%d", dpif_port
,
141 ntohs(vport
->tnl_cfg
.dst_port
));
148 /* Whenever the route-table change number is incremented,
149 * netdev_vport_route_changed() should be called to update
150 * the corresponding tunnel interface status. */
152 netdev_vport_route_changed(void)
154 struct netdev
**vports
;
157 vports
= netdev_get_vports(&n_vports
);
158 for (i
= 0; i
< n_vports
; i
++) {
159 struct netdev
*netdev_
= vports
[i
];
160 struct netdev_vport
*netdev
= netdev_vport_cast(netdev_
);
162 ovs_mutex_lock(&netdev
->mutex
);
163 /* Finds all tunnel vports. */
164 if (ipv6_addr_is_set(&netdev
->tnl_cfg
.ipv6_dst
)) {
165 if (tunnel_check_status_change__(netdev
)) {
166 netdev_change_seq_changed(netdev_
);
169 ovs_mutex_unlock(&netdev
->mutex
);
171 netdev_close(netdev_
);
177 static struct netdev
*
178 netdev_vport_alloc(void)
180 struct netdev_vport
*netdev
= xzalloc(sizeof *netdev
);
185 netdev_vport_construct(struct netdev
*netdev_
)
187 struct netdev_vport
*dev
= netdev_vport_cast(netdev_
);
188 const char *type
= netdev_get_type(netdev_
);
190 ovs_mutex_init(&dev
->mutex
);
191 eth_addr_random(&dev
->etheraddr
);
193 /* Add a default destination port for tunnel ports if none specified. */
194 if (!strcmp(type
, "geneve")) {
195 dev
->tnl_cfg
.dst_port
= htons(GENEVE_DST_PORT
);
196 } else if (!strcmp(type
, "vxlan")) {
197 dev
->tnl_cfg
.dst_port
= htons(VXLAN_DST_PORT
);
198 } else if (!strcmp(type
, "lisp")) {
199 dev
->tnl_cfg
.dst_port
= htons(LISP_DST_PORT
);
200 } else if (!strcmp(type
, "stt")) {
201 dev
->tnl_cfg
.dst_port
= htons(STT_DST_PORT
);
204 dev
->tnl_cfg
.dont_fragment
= true;
205 dev
->tnl_cfg
.ttl
= DEFAULT_TTL
;
210 netdev_vport_destruct(struct netdev
*netdev_
)
212 struct netdev_vport
*netdev
= netdev_vport_cast(netdev_
);
215 ovs_mutex_destroy(&netdev
->mutex
);
219 netdev_vport_dealloc(struct netdev
*netdev_
)
221 struct netdev_vport
*netdev
= netdev_vport_cast(netdev_
);
226 netdev_vport_set_etheraddr(struct netdev
*netdev_
, const struct eth_addr mac
)
228 struct netdev_vport
*netdev
= netdev_vport_cast(netdev_
);
230 ovs_mutex_lock(&netdev
->mutex
);
231 netdev
->etheraddr
= mac
;
232 ovs_mutex_unlock(&netdev
->mutex
);
233 netdev_change_seq_changed(netdev_
);
239 netdev_vport_get_etheraddr(const struct netdev
*netdev_
, struct eth_addr
*mac
)
241 struct netdev_vport
*netdev
= netdev_vport_cast(netdev_
);
243 ovs_mutex_lock(&netdev
->mutex
);
244 *mac
= netdev
->etheraddr
;
245 ovs_mutex_unlock(&netdev
->mutex
);
250 /* Checks if the tunnel status has changed and returns a boolean.
251 * Updates the tunnel status if it has changed. */
253 tunnel_check_status_change__(struct netdev_vport
*netdev
)
254 OVS_REQUIRES(netdev
->mutex
)
256 char iface
[IFNAMSIZ
];
258 struct in6_addr
*route
;
263 route
= &netdev
->tnl_cfg
.ipv6_dst
;
264 mark
= netdev
->tnl_cfg
.egress_pkt_mark
;
265 if (ovs_router_lookup(mark
, route
, iface
, NULL
, &gw
)) {
266 struct netdev
*egress_netdev
;
268 if (!netdev_open(iface
, NULL
, &egress_netdev
)) {
269 status
= netdev_get_carrier(egress_netdev
);
270 netdev_close(egress_netdev
);
274 if (strcmp(netdev
->egress_iface
, iface
)
275 || netdev
->carrier_status
!= status
) {
276 ovs_strlcpy_arrays(netdev
->egress_iface
, iface
);
277 netdev
->carrier_status
= status
;
286 tunnel_get_status(const struct netdev
*netdev_
, struct smap
*smap
)
288 struct netdev_vport
*netdev
= netdev_vport_cast(netdev_
);
290 if (netdev
->egress_iface
[0]) {
291 smap_add(smap
, "tunnel_egress_iface", netdev
->egress_iface
);
293 smap_add(smap
, "tunnel_egress_iface_carrier",
294 netdev
->carrier_status
? "up" : "down");
301 netdev_vport_update_flags(struct netdev
*netdev OVS_UNUSED
,
302 enum netdev_flags off
,
303 enum netdev_flags on OVS_UNUSED
,
304 enum netdev_flags
*old_flagsp
)
306 if (off
& (NETDEV_UP
| NETDEV_PROMISC
)) {
310 *old_flagsp
= NETDEV_UP
| NETDEV_PROMISC
;
315 netdev_vport_run(const struct netdev_class
*netdev_class OVS_UNUSED
)
320 seq
= route_table_get_change_seq();
321 if (rt_change_seqno
!= seq
) {
322 rt_change_seqno
= seq
;
323 netdev_vport_route_changed();
328 netdev_vport_wait(const struct netdev_class
*netdev_class OVS_UNUSED
)
333 seq
= route_table_get_change_seq();
334 if (rt_change_seqno
!= seq
) {
335 poll_immediate_wake();
339 /* Code specific to tunnel types. */
342 parse_key(const struct smap
*args
, const char *name
,
343 bool *present
, bool *flow
)
350 s
= smap_get(args
, name
);
352 s
= smap_get(args
, "key");
360 if (!strcmp(s
, "flow")) {
364 return htonll(strtoull(s
, NULL
, 0));
369 parse_tunnel_ip(const char *value
, bool accept_mcast
, bool *flow
,
370 struct in6_addr
*ipv6
, uint16_t *protocol
)
372 if (!strcmp(value
, "flow")) {
377 if (addr_is_ipv6(value
)) {
378 if (lookup_ipv6(value
, ipv6
)) {
381 if (!accept_mcast
&& ipv6_addr_is_multicast(ipv6
)) {
384 *protocol
= ETH_TYPE_IPV6
;
387 if (lookup_ip(value
, &ip
)) {
390 if (!accept_mcast
&& ip_is_multicast(ip
.s_addr
)) {
393 in6_addr_set_mapped_ipv4(ipv6
, ip
.s_addr
);
394 *protocol
= ETH_TYPE_IP
;
400 TNL_L2
= 1 << 0, /* 1 if a tunnel type can carry Ethernet traffic. */
401 TNL_L3
= 1 << 1 /* 1 if a tunnel type can carry L3 traffic. */
403 static enum tunnel_layers
404 tunnel_supported_layers(const char *type
,
405 const struct netdev_tunnel_config
*tnl_cfg
)
407 if (!strcmp(type
, "lisp")) {
409 } else if (!strcmp(type
, "gre")) {
410 return TNL_L2
| TNL_L3
;
411 } else if (!strcmp(type
, "vxlan")
412 && tnl_cfg
->exts
& (1 << OVS_VXLAN_EXT_GPE
)) {
413 return TNL_L2
| TNL_L3
;
418 static enum netdev_pt_mode
419 default_pt_mode(enum tunnel_layers layers
)
421 return layers
== TNL_L3
? NETDEV_PT_LEGACY_L3
: NETDEV_PT_LEGACY_L2
;
425 set_tunnel_config(struct netdev
*dev_
, const struct smap
*args
, char **errp
)
427 struct netdev_vport
*dev
= netdev_vport_cast(dev_
);
428 const char *name
= netdev_get_name(dev_
);
429 const char *type
= netdev_get_type(dev_
);
430 struct ds errors
= DS_EMPTY_INITIALIZER
;
431 bool needs_dst_port
, has_csum
, has_seq
;
432 uint16_t dst_proto
= 0, src_proto
= 0;
433 struct netdev_tunnel_config tnl_cfg
;
434 struct smap_node
*node
;
437 has_csum
= strstr(type
, "gre") || strstr(type
, "geneve") ||
438 strstr(type
, "stt") || strstr(type
, "vxlan");
439 has_seq
= strstr(type
, "gre");
440 memset(&tnl_cfg
, 0, sizeof tnl_cfg
);
442 /* Add a default destination port for tunnel ports if none specified. */
443 if (!strcmp(type
, "geneve")) {
444 tnl_cfg
.dst_port
= htons(GENEVE_DST_PORT
);
447 if (!strcmp(type
, "vxlan")) {
448 tnl_cfg
.dst_port
= htons(VXLAN_DST_PORT
);
451 if (!strcmp(type
, "lisp")) {
452 tnl_cfg
.dst_port
= htons(LISP_DST_PORT
);
455 if (!strcmp(type
, "stt")) {
456 tnl_cfg
.dst_port
= htons(STT_DST_PORT
);
459 needs_dst_port
= netdev_vport_needs_dst_port(dev_
);
460 tnl_cfg
.dont_fragment
= true;
462 SMAP_FOR_EACH (node
, args
) {
463 if (!strcmp(node
->key
, "remote_ip")) {
464 err
= parse_tunnel_ip(node
->value
, false, &tnl_cfg
.ip_dst_flow
,
465 &tnl_cfg
.ipv6_dst
, &dst_proto
);
468 ds_put_format(&errors
, "%s: bad %s 'remote_ip'\n", name
, type
);
471 ds_put_format(&errors
,
472 "%s: multicast remote_ip=%s not allowed\n",
476 } else if (!strcmp(node
->key
, "local_ip")) {
477 err
= parse_tunnel_ip(node
->value
, true, &tnl_cfg
.ip_src_flow
,
478 &tnl_cfg
.ipv6_src
, &src_proto
);
481 ds_put_format(&errors
, "%s: bad %s 'local_ip'\n", name
, type
);
484 } else if (!strcmp(node
->key
, "tos")) {
485 if (!strcmp(node
->value
, "inherit")) {
486 tnl_cfg
.tos_inherit
= true;
490 tos
= strtol(node
->value
, &endptr
, 0);
491 if (*endptr
== '\0' && tos
== (tos
& IP_DSCP_MASK
)) {
494 ds_put_format(&errors
, "%s: invalid TOS %s\n", name
,
498 } else if (!strcmp(node
->key
, "ttl")) {
499 if (!strcmp(node
->value
, "inherit")) {
500 tnl_cfg
.ttl_inherit
= true;
502 tnl_cfg
.ttl
= atoi(node
->value
);
504 } else if (!strcmp(node
->key
, "dst_port") && needs_dst_port
) {
505 tnl_cfg
.dst_port
= htons(atoi(node
->value
));
506 } else if (!strcmp(node
->key
, "csum") && has_csum
) {
507 if (!strcmp(node
->value
, "true")) {
510 } else if (!strcmp(node
->key
, "seq") && has_seq
) {
511 if (!strcmp(node
->value
, "true")) {
512 tnl_cfg
.set_seq
= true;
514 } else if (!strcmp(node
->key
, "df_default")) {
515 if (!strcmp(node
->value
, "false")) {
516 tnl_cfg
.dont_fragment
= false;
518 } else if (!strcmp(node
->key
, "key") ||
519 !strcmp(node
->key
, "in_key") ||
520 !strcmp(node
->key
, "out_key") ||
521 !strcmp(node
->key
, "packet_type")) {
522 /* Handled separately below. */
523 } else if (!strcmp(node
->key
, "exts") && !strcmp(type
, "vxlan")) {
524 char *str
= xstrdup(node
->value
);
525 char *ext
, *save_ptr
= NULL
;
529 ext
= strtok_r(str
, ",", &save_ptr
);
531 if (!strcmp(type
, "vxlan") && !strcmp(ext
, "gbp")) {
532 tnl_cfg
.exts
|= (1 << OVS_VXLAN_EXT_GBP
);
533 } else if (!strcmp(type
, "vxlan") && !strcmp(ext
, "gpe")) {
534 tnl_cfg
.exts
|= (1 << OVS_VXLAN_EXT_GPE
);
536 ds_put_format(&errors
, "%s: unknown extension '%s'\n",
540 ext
= strtok_r(NULL
, ",", &save_ptr
);
544 } else if (!strcmp(node
->key
, "egress_pkt_mark")) {
545 tnl_cfg
.egress_pkt_mark
= strtoul(node
->value
, NULL
, 10);
546 tnl_cfg
.set_egress_pkt_mark
= true;
547 } else if (!strcmp(node
->key
, "erspan_idx")) {
548 if (!strcmp(node
->value
, "flow")) {
549 tnl_cfg
.erspan_idx_flow
= true;
551 tnl_cfg
.erspan_idx_flow
= false;
552 tnl_cfg
.erspan_idx
= strtol(node
->value
, NULL
, 16);
554 if (tnl_cfg
.erspan_idx
& ~ERSPAN_IDX_MASK
) {
555 ds_put_format(&errors
, "%s: invalid erspan index: %s\n",
561 } else if (!strcmp(node
->key
, "erspan_ver")) {
562 if (!strcmp(node
->value
, "flow")) {
563 tnl_cfg
.erspan_ver_flow
= true;
564 tnl_cfg
.erspan_idx_flow
= true;
565 tnl_cfg
.erspan_dir_flow
= true;
566 tnl_cfg
.erspan_hwid_flow
= true;
568 tnl_cfg
.erspan_ver_flow
= false;
569 tnl_cfg
.erspan_ver
= atoi(node
->value
);
571 if (tnl_cfg
.erspan_ver
!= 1 && tnl_cfg
.erspan_ver
!= 2) {
572 ds_put_format(&errors
, "%s: invalid erspan version: %s\n",
578 } else if (!strcmp(node
->key
, "erspan_dir")) {
579 if (!strcmp(node
->value
, "flow")) {
580 tnl_cfg
.erspan_dir_flow
= true;
582 tnl_cfg
.erspan_dir_flow
= false;
583 tnl_cfg
.erspan_dir
= atoi(node
->value
);
585 if (tnl_cfg
.erspan_dir
!= 0 && tnl_cfg
.erspan_dir
!= 1) {
586 ds_put_format(&errors
, "%s: invalid erspan direction: %s\n",
592 } else if (!strcmp(node
->key
, "erspan_hwid")) {
593 if (!strcmp(node
->value
, "flow")) {
594 tnl_cfg
.erspan_hwid_flow
= true;
596 tnl_cfg
.erspan_hwid_flow
= false;
597 tnl_cfg
.erspan_hwid
= strtol(node
->value
, NULL
, 16);
599 if (tnl_cfg
.erspan_hwid
& ~(ERSPAN_HWID_MASK
>> 4)) {
600 ds_put_format(&errors
, "%s: invalid erspan hardware ID: %s\n",
607 ds_put_format(&errors
, "%s: unknown %s argument '%s'\n", name
,
612 enum tunnel_layers layers
= tunnel_supported_layers(type
, &tnl_cfg
);
613 const char *full_type
= (strcmp(type
, "vxlan") ? type
614 : (tnl_cfg
.exts
& (1 << OVS_VXLAN_EXT_GPE
)
615 ? "VXLAN-GPE" : "VXLAN (without GPE"));
616 const char *packet_type
= smap_get(args
, "packet_type");
618 tnl_cfg
.pt_mode
= default_pt_mode(layers
);
619 } else if (!strcmp(packet_type
, "legacy_l2")) {
620 tnl_cfg
.pt_mode
= NETDEV_PT_LEGACY_L2
;
621 if (!(layers
& TNL_L2
)) {
622 ds_put_format(&errors
, "%s: legacy_l2 configured on %s tunnel "
623 "that cannot carry L2 traffic\n",
628 } else if (!strcmp(packet_type
, "legacy_l3")) {
629 tnl_cfg
.pt_mode
= NETDEV_PT_LEGACY_L3
;
630 if (!(layers
& TNL_L3
)) {
631 ds_put_format(&errors
, "%s: legacy_l3 configured on %s tunnel "
632 "that cannot carry L3 traffic\n",
637 } else if (!strcmp(packet_type
, "ptap")) {
638 tnl_cfg
.pt_mode
= NETDEV_PT_AWARE
;
640 ds_put_format(&errors
, "%s: unknown packet_type '%s'\n",
646 if (!ipv6_addr_is_set(&tnl_cfg
.ipv6_dst
) && !tnl_cfg
.ip_dst_flow
) {
647 ds_put_format(&errors
,
648 "%s: %s type requires valid 'remote_ip' argument\n",
653 if (tnl_cfg
.ip_src_flow
&& !tnl_cfg
.ip_dst_flow
) {
654 ds_put_format(&errors
,
655 "%s: %s type requires 'remote_ip=flow' "
656 "with 'local_ip=flow'\n",
661 if (src_proto
&& dst_proto
&& src_proto
!= dst_proto
) {
662 ds_put_format(&errors
,
663 "%s: 'remote_ip' and 'local_ip' "
664 "has to be of the same address family\n",
670 tnl_cfg
.ttl
= DEFAULT_TTL
;
673 tnl_cfg
.in_key
= parse_key(args
, "in_key",
674 &tnl_cfg
.in_key_present
,
675 &tnl_cfg
.in_key_flow
);
677 tnl_cfg
.out_key
= parse_key(args
, "out_key",
678 &tnl_cfg
.out_key_present
,
679 &tnl_cfg
.out_key_flow
);
681 ovs_mutex_lock(&dev
->mutex
);
682 if (memcmp(&dev
->tnl_cfg
, &tnl_cfg
, sizeof tnl_cfg
)) {
683 dev
->tnl_cfg
= tnl_cfg
;
684 tunnel_check_status_change__(dev
);
685 netdev_change_seq_changed(dev_
);
687 ovs_mutex_unlock(&dev
->mutex
);
693 ds_chomp(&errors
, '\n');
694 VLOG_WARN("%s", ds_cstr(&errors
));
696 *errp
= ds_steal_cstr(&errors
);
706 get_tunnel_config(const struct netdev
*dev
, struct smap
*args
)
708 struct netdev_vport
*netdev
= netdev_vport_cast(dev
);
709 const char *type
= netdev_get_type(dev
);
710 struct netdev_tunnel_config tnl_cfg
;
712 ovs_mutex_lock(&netdev
->mutex
);
713 tnl_cfg
= netdev
->tnl_cfg
;
714 ovs_mutex_unlock(&netdev
->mutex
);
716 if (ipv6_addr_is_set(&tnl_cfg
.ipv6_dst
)) {
717 smap_add_ipv6(args
, "remote_ip", &tnl_cfg
.ipv6_dst
);
718 } else if (tnl_cfg
.ip_dst_flow
) {
719 smap_add(args
, "remote_ip", "flow");
722 if (ipv6_addr_is_set(&tnl_cfg
.ipv6_src
)) {
723 smap_add_ipv6(args
, "local_ip", &tnl_cfg
.ipv6_src
);
724 } else if (tnl_cfg
.ip_src_flow
) {
725 smap_add(args
, "local_ip", "flow");
728 if (tnl_cfg
.in_key_flow
&& tnl_cfg
.out_key_flow
) {
729 smap_add(args
, "key", "flow");
730 } else if (tnl_cfg
.in_key_present
&& tnl_cfg
.out_key_present
731 && tnl_cfg
.in_key
== tnl_cfg
.out_key
) {
732 smap_add_format(args
, "key", "%"PRIu64
, ntohll(tnl_cfg
.in_key
));
734 if (tnl_cfg
.in_key_flow
) {
735 smap_add(args
, "in_key", "flow");
736 } else if (tnl_cfg
.in_key_present
) {
737 smap_add_format(args
, "in_key", "%"PRIu64
,
738 ntohll(tnl_cfg
.in_key
));
741 if (tnl_cfg
.out_key_flow
) {
742 smap_add(args
, "out_key", "flow");
743 } else if (tnl_cfg
.out_key_present
) {
744 smap_add_format(args
, "out_key", "%"PRIu64
,
745 ntohll(tnl_cfg
.out_key
));
749 if (tnl_cfg
.ttl_inherit
) {
750 smap_add(args
, "ttl", "inherit");
751 } else if (tnl_cfg
.ttl
!= DEFAULT_TTL
) {
752 smap_add_format(args
, "ttl", "%"PRIu8
, tnl_cfg
.ttl
);
755 if (tnl_cfg
.tos_inherit
) {
756 smap_add(args
, "tos", "inherit");
757 } else if (tnl_cfg
.tos
) {
758 smap_add_format(args
, "tos", "0x%x", tnl_cfg
.tos
);
761 if (tnl_cfg
.dst_port
) {
762 uint16_t dst_port
= ntohs(tnl_cfg
.dst_port
);
764 if ((!strcmp("geneve", type
) && dst_port
!= GENEVE_DST_PORT
) ||
765 (!strcmp("vxlan", type
) && dst_port
!= VXLAN_DST_PORT
) ||
766 (!strcmp("lisp", type
) && dst_port
!= LISP_DST_PORT
) ||
767 (!strcmp("stt", type
) && dst_port
!= STT_DST_PORT
)) {
768 smap_add_format(args
, "dst_port", "%d", dst_port
);
773 smap_add(args
, "csum", "true");
776 if (tnl_cfg
.set_seq
) {
777 smap_add(args
, "seq", "true");
780 enum tunnel_layers layers
= tunnel_supported_layers(type
, &tnl_cfg
);
781 if (tnl_cfg
.pt_mode
!= default_pt_mode(layers
)) {
782 smap_add(args
, "packet_type",
783 tnl_cfg
.pt_mode
== NETDEV_PT_LEGACY_L2
? "legacy_l2"
784 : tnl_cfg
.pt_mode
== NETDEV_PT_LEGACY_L3
? "legacy_l3"
788 if (!tnl_cfg
.dont_fragment
) {
789 smap_add(args
, "df_default", "false");
792 if (tnl_cfg
.set_egress_pkt_mark
) {
793 smap_add_format(args
, "egress_pkt_mark",
794 "%"PRIu32
, tnl_cfg
.egress_pkt_mark
);
797 if (!strcmp("erspan", type
) || !strcmp("ip6erspan", type
)) {
798 if (tnl_cfg
.erspan_ver_flow
) {
799 /* since version number is not determined,
800 * assume print all other as flow
802 smap_add(args
, "erspan_ver", "flow");
803 smap_add(args
, "erspan_idx", "flow");
804 smap_add(args
, "erspan_dir", "flow");
805 smap_add(args
, "erspan_hwid", "flow");
807 smap_add_format(args
, "erspan_ver", "%d", tnl_cfg
.erspan_ver
);
809 if (tnl_cfg
.erspan_ver
== 1) {
810 if (tnl_cfg
.erspan_idx_flow
) {
811 smap_add(args
, "erspan_idx", "flow");
813 smap_add_format(args
, "erspan_idx", "0x%x",
816 } else if (tnl_cfg
.erspan_ver
== 2) {
817 if (tnl_cfg
.erspan_dir_flow
) {
818 smap_add(args
, "erspan_dir", "flow");
820 smap_add_format(args
, "erspan_dir", "%d",
823 if (tnl_cfg
.erspan_hwid_flow
) {
824 smap_add(args
, "erspan_hwid", "flow");
826 smap_add_format(args
, "erspan_hwid", "0x%x",
827 tnl_cfg
.erspan_hwid
);
836 /* Code specific to patch ports. */
838 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
839 * string that the caller must free.
841 * If 'netdev' is not a patch port, returns NULL. */
843 netdev_vport_patch_peer(const struct netdev
*netdev_
)
847 if (netdev_vport_is_patch(netdev_
)) {
848 struct netdev_vport
*netdev
= netdev_vport_cast(netdev_
);
850 ovs_mutex_lock(&netdev
->mutex
);
852 peer
= xstrdup(netdev
->peer
);
854 ovs_mutex_unlock(&netdev
->mutex
);
861 netdev_vport_inc_rx(const struct netdev
*netdev
,
862 const struct dpif_flow_stats
*stats
)
864 if (is_vport_class(netdev_get_class(netdev
))) {
865 struct netdev_vport
*dev
= netdev_vport_cast(netdev
);
867 ovs_mutex_lock(&dev
->mutex
);
868 dev
->stats
.rx_packets
+= stats
->n_packets
;
869 dev
->stats
.rx_bytes
+= stats
->n_bytes
;
870 ovs_mutex_unlock(&dev
->mutex
);
875 netdev_vport_inc_tx(const struct netdev
*netdev
,
876 const struct dpif_flow_stats
*stats
)
878 if (is_vport_class(netdev_get_class(netdev
))) {
879 struct netdev_vport
*dev
= netdev_vport_cast(netdev
);
881 ovs_mutex_lock(&dev
->mutex
);
882 dev
->stats
.tx_packets
+= stats
->n_packets
;
883 dev
->stats
.tx_bytes
+= stats
->n_bytes
;
884 ovs_mutex_unlock(&dev
->mutex
);
889 get_patch_config(const struct netdev
*dev_
, struct smap
*args
)
891 struct netdev_vport
*dev
= netdev_vport_cast(dev_
);
893 ovs_mutex_lock(&dev
->mutex
);
895 smap_add(args
, "peer", dev
->peer
);
897 ovs_mutex_unlock(&dev
->mutex
);
903 set_patch_config(struct netdev
*dev_
, const struct smap
*args
, char **errp
)
905 struct netdev_vport
*dev
= netdev_vport_cast(dev_
);
906 const char *name
= netdev_get_name(dev_
);
909 peer
= smap_get(args
, "peer");
911 VLOG_ERR_BUF(errp
, "%s: patch type requires valid 'peer' argument",
916 if (smap_count(args
) > 1) {
917 VLOG_ERR_BUF(errp
, "%s: patch type takes only a 'peer' argument",
922 if (!strcmp(name
, peer
)) {
923 VLOG_ERR_BUF(errp
, "%s: patch peer must not be self", name
);
927 ovs_mutex_lock(&dev
->mutex
);
928 if (!dev
->peer
|| strcmp(dev
->peer
, peer
)) {
930 dev
->peer
= xstrdup(peer
);
931 netdev_change_seq_changed(dev_
);
933 ovs_mutex_unlock(&dev
->mutex
);
939 get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
941 struct netdev_vport
*dev
= netdev_vport_cast(netdev
);
943 ovs_mutex_lock(&dev
->mutex
);
944 /* Passing only collected counters */
945 stats
->tx_packets
= dev
->stats
.tx_packets
;
946 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
947 stats
->rx_packets
= dev
->stats
.rx_packets
;
948 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
949 ovs_mutex_unlock(&dev
->mutex
);
954 static enum netdev_pt_mode
955 get_pt_mode(const struct netdev
*netdev
)
957 struct netdev_vport
*dev
= netdev_vport_cast(netdev
);
959 return dev
->tnl_cfg
.pt_mode
;
966 netdev_vport_get_ifindex(const struct netdev
*netdev_
)
968 char buf
[NETDEV_VPORT_NAME_BUFSIZE
];
969 const char *name
= netdev_vport_get_dpif_port(netdev_
, buf
, sizeof(buf
));
971 return linux_get_ifindex(name
);
974 #define NETDEV_VPORT_GET_IFINDEX netdev_vport_get_ifindex
975 #define NETDEV_FLOW_OFFLOAD_API LINUX_FLOW_OFFLOAD_API
976 #else /* !__linux__ */
977 #define NETDEV_VPORT_GET_IFINDEX NULL
978 #define NETDEV_FLOW_OFFLOAD_API NO_OFFLOAD_API
979 #endif /* __linux__ */
981 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
982 GET_TUNNEL_CONFIG, GET_STATUS, \
984 PUSH_HEADER, POP_HEADER, \
990 netdev_vport_alloc, \
991 netdev_vport_construct, \
992 netdev_vport_destruct, \
993 netdev_vport_dealloc, \
1000 NULL, /* get_numa_id */ \
1001 NULL, /* set_tx_multiq */ \
1004 NULL, /* send_wait */ \
1006 netdev_vport_set_etheraddr, \
1007 netdev_vport_get_etheraddr, \
1008 NULL, /* get_mtu */ \
1009 NULL, /* set_mtu */ \
1011 NULL, /* get_carrier */ \
1012 NULL, /* get_carrier_resets */ \
1013 NULL, /* get_miimon */ \
1015 NULL, /* get_custom_stats */ \
1017 NULL, /* get_features */ \
1018 NULL, /* set_advertisements */ \
1021 NULL, /* set_policing */ \
1022 NULL, /* get_qos_types */ \
1023 NULL, /* get_qos_capabilities */ \
1024 NULL, /* get_qos */ \
1025 NULL, /* set_qos */ \
1026 NULL, /* get_queue */ \
1027 NULL, /* set_queue */ \
1028 NULL, /* delete_queue */ \
1029 NULL, /* get_queue_stats */ \
1030 NULL, /* queue_dump_start */ \
1031 NULL, /* queue_dump_next */ \
1032 NULL, /* queue_dump_done */ \
1033 NULL, /* dump_queue_stats */ \
1035 NULL, /* set_in4 */ \
1036 NULL, /* get_addr_list */ \
1037 NULL, /* add_router */ \
1038 NULL, /* get_next_hop */ \
1040 NULL, /* arp_lookup */ \
1042 netdev_vport_update_flags, \
1043 NULL, /* reconfigure */ \
1045 NULL, /* rx_alloc */ \
1046 NULL, /* rx_construct */ \
1047 NULL, /* rx_destruct */ \
1048 NULL, /* rx_dealloc */ \
1049 NULL, /* rx_recv */ \
1050 NULL, /* rx_wait */ \
1051 NULL, /* rx_drain */ \
1053 NETDEV_FLOW_OFFLOAD_API, \
1054 NULL /* get_block_id */
1057 #define TUNNEL_CLASS(NAME, DPIF_PORT, BUILD_HEADER, PUSH_HEADER, POP_HEADER, \
1061 VPORT_FUNCTIONS(get_tunnel_config, \
1062 set_tunnel_config, \
1063 get_netdev_tunnel_config, \
1064 tunnel_get_status, \
1065 BUILD_HEADER, PUSH_HEADER, POP_HEADER, \
1069 netdev_vport_tunnel_register(void)
1071 /* The name of the dpif_port should be short enough to accomodate adding
1072 * a port number to the end if one is necessary. */
1073 static const struct vport_class vport_classes
[] = {
1074 TUNNEL_CLASS("geneve", "genev_sys", netdev_geneve_build_header
,
1075 netdev_tnl_push_udp_header
,
1076 netdev_geneve_pop_header
,
1077 NETDEV_VPORT_GET_IFINDEX
),
1078 TUNNEL_CLASS("gre", "gre_sys", netdev_gre_build_header
,
1079 netdev_gre_push_header
,
1080 netdev_gre_pop_header
,
1082 TUNNEL_CLASS("vxlan", "vxlan_sys", netdev_vxlan_build_header
,
1083 netdev_tnl_push_udp_header
,
1084 netdev_vxlan_pop_header
,
1085 NETDEV_VPORT_GET_IFINDEX
),
1086 TUNNEL_CLASS("lisp", "lisp_sys", NULL
, NULL
, NULL
, NULL
),
1087 TUNNEL_CLASS("stt", "stt_sys", NULL
, NULL
, NULL
, NULL
),
1088 TUNNEL_CLASS("erspan", "erspan_sys", netdev_erspan_build_header
,
1089 netdev_erspan_push_header
,
1090 netdev_erspan_pop_header
,
1092 TUNNEL_CLASS("ip6erspan", "ip6erspan_sys", netdev_erspan_build_header
,
1093 netdev_erspan_push_header
,
1094 netdev_erspan_pop_header
,
1096 TUNNEL_CLASS("ip6gre", "ip6gre_sys", netdev_gre_build_header
,
1097 netdev_gre_push_header
,
1098 netdev_gre_pop_header
,
1101 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
1103 if (ovsthread_once_start(&once
)) {
1106 for (i
= 0; i
< ARRAY_SIZE(vport_classes
); i
++) {
1107 netdev_register_provider(&vport_classes
[i
].netdev_class
);
1110 unixctl_command_register("tnl/egress_port_range", "min max", 0, 2,
1111 netdev_tnl_egress_port_range
, NULL
);
1113 ovsthread_once_done(&once
);
1118 netdev_vport_patch_register(void)
1120 static const struct vport_class patch_class
=
1123 VPORT_FUNCTIONS(get_patch_config
,
1126 NULL
, NULL
, NULL
, NULL
, NULL
) }};
1127 netdev_register_provider(&patch_class
.netdev_class
);