2 * Copyright (c) 2008-2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "dpif-netlink.h"
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
31 #include <sys/epoll.h>
36 #include "dpif-netlink-rtnl.h"
37 #include "dpif-provider.h"
38 #include "fat-rwlock.h"
40 #include "netdev-linux.h"
41 #include "netdev-provider.h"
42 #include "netdev-vport.h"
44 #include "netlink-conntrack.h"
45 #include "netlink-notifier.h"
46 #include "netlink-socket.h"
50 #include "openvswitch/dynamic-string.h"
51 #include "openvswitch/flow.h"
52 #include "openvswitch/match.h"
53 #include "openvswitch/ofpbuf.h"
54 #include "openvswitch/poll-loop.h"
55 #include "openvswitch/shash.h"
56 #include "openvswitch/thread.h"
57 #include "openvswitch/vlog.h"
62 #include "unaligned.h"
65 VLOG_DEFINE_THIS_MODULE(dpif_netlink
);
72 enum { MAX_PORTS
= USHRT_MAX
};
74 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
75 * missing if we have old headers. */
76 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
78 #define FLOW_DUMP_MAX_BATCH 50
79 #define OPERATE_MAX_OPS 50
81 #ifndef EPOLLEXCLUSIVE
82 #define EPOLLEXCLUSIVE (1u << 28)
85 struct dpif_netlink_dp
{
86 /* Generic Netlink header. */
89 /* struct ovs_header. */
93 const char *name
; /* OVS_DP_ATTR_NAME. */
94 const uint32_t *upcall_pid
; /* OVS_DP_ATTR_UPCALL_PID. */
95 uint32_t user_features
; /* OVS_DP_ATTR_USER_FEATURES */
96 const struct ovs_dp_stats
*stats
; /* OVS_DP_ATTR_STATS. */
97 const struct ovs_dp_megaflow_stats
*megaflow_stats
;
98 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
101 static void dpif_netlink_dp_init(struct dpif_netlink_dp
*);
102 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp
*,
103 const struct ofpbuf
*);
104 static void dpif_netlink_dp_dump_start(struct nl_dump
*);
105 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp
*request
,
106 struct dpif_netlink_dp
*reply
,
107 struct ofpbuf
**bufp
);
108 static int dpif_netlink_dp_get(const struct dpif
*,
109 struct dpif_netlink_dp
*reply
,
110 struct ofpbuf
**bufp
);
112 struct dpif_netlink_flow
{
113 /* Generic Netlink header. */
116 /* struct ovs_header. */
117 unsigned int nlmsg_flags
;
122 * The 'stats' member points to 64-bit data that might only be aligned on
123 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
126 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
127 * the Netlink version of the command, even if actions_len is zero. */
128 const struct nlattr
*key
; /* OVS_FLOW_ATTR_KEY. */
130 const struct nlattr
*mask
; /* OVS_FLOW_ATTR_MASK. */
132 const struct nlattr
*actions
; /* OVS_FLOW_ATTR_ACTIONS. */
134 ovs_u128 ufid
; /* OVS_FLOW_ATTR_FLOW_ID. */
135 bool ufid_present
; /* Is there a UFID? */
136 bool ufid_terse
; /* Skip serializing key/mask/acts? */
137 const struct ovs_flow_stats
*stats
; /* OVS_FLOW_ATTR_STATS. */
138 const uint8_t *tcp_flags
; /* OVS_FLOW_ATTR_TCP_FLAGS. */
139 const ovs_32aligned_u64
*used
; /* OVS_FLOW_ATTR_USED. */
140 bool clear
; /* OVS_FLOW_ATTR_CLEAR. */
141 bool probe
; /* OVS_FLOW_ATTR_PROBE. */
144 static void dpif_netlink_flow_init(struct dpif_netlink_flow
*);
145 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow
*,
146 const struct ofpbuf
*);
147 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow
*,
149 static int dpif_netlink_flow_transact(struct dpif_netlink_flow
*request
,
150 struct dpif_netlink_flow
*reply
,
151 struct ofpbuf
**bufp
);
152 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow
*,
153 struct dpif_flow_stats
*);
154 static void dpif_netlink_flow_to_dpif_flow(struct dpif
*, struct dpif_flow
*,
155 const struct dpif_netlink_flow
*);
157 /* One of the dpif channels between the kernel and userspace. */
158 struct dpif_channel
{
159 struct nl_sock
*sock
; /* Netlink socket. */
160 long long int last_poll
; /* Last time this channel was polled. */
164 #define VPORT_SOCK_POOL_SIZE 1
165 /* On Windows, there is no native support for epoll. There are equivalent
166 * interfaces though, that are not used currently. For simpicity, a pool of
167 * netlink sockets is used. Each socket is represented by 'struct
168 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
169 * sharing the same socket. In the future, we can add a reference count and
171 struct dpif_windows_vport_sock
{
172 struct nl_sock
*nl_sock
; /* netlink socket. */
176 struct dpif_handler
{
177 struct epoll_event
*epoll_events
;
178 int epoll_fd
; /* epoll fd that includes channel socks. */
179 int n_events
; /* Num events returned by epoll_wait(). */
180 int event_offset
; /* Offset into 'epoll_events'. */
183 /* Pool of sockets. */
184 struct dpif_windows_vport_sock
*vport_sock_pool
;
185 size_t last_used_pool_idx
; /* Index to aid in allocating a
186 socket in the pool to a port. */
190 /* Datapath interface for the openvswitch Linux kernel module. */
191 struct dpif_netlink
{
195 /* Upcall messages. */
196 struct fat_rwlock upcall_lock
;
197 struct dpif_handler
*handlers
;
198 uint32_t n_handlers
; /* Num of upcall handlers. */
199 struct dpif_channel
*channels
; /* Array of channels for each port. */
200 int uc_array_size
; /* Size of 'handler->channels' and */
201 /* 'handler->epoll_events'. */
203 /* Change notification. */
204 struct nl_sock
*port_notifier
; /* vport multicast group subscriber. */
205 bool refresh_channels
;
208 static void report_loss(struct dpif_netlink
*, struct dpif_channel
*,
209 uint32_t ch_idx
, uint32_t handler_id
);
211 static struct vlog_rate_limit error_rl
= VLOG_RATE_LIMIT_INIT(9999, 5);
213 /* Generic Netlink family numbers for OVS.
215 * Initialized by dpif_netlink_init(). */
216 static int ovs_datapath_family
;
217 static int ovs_vport_family
;
218 static int ovs_flow_family
;
219 static int ovs_packet_family
;
220 static int ovs_meter_family
;
221 static int ovs_ct_limit_family
;
223 /* Generic Netlink multicast groups for OVS.
225 * Initialized by dpif_netlink_init(). */
226 static unsigned int ovs_vport_mcgroup
;
228 /* If true, tunnel devices are created using OVS compat/genetlink.
229 * If false, tunnel devices are created with rtnetlink and using light weight
230 * tunnels. If we fail to create the tunnel the rtnetlink+LWT, then we fallback
231 * to using the compat interface. */
232 static bool ovs_tunnels_out_of_tree
= true;
234 static int dpif_netlink_init(void);
235 static int open_dpif(const struct dpif_netlink_dp
*, struct dpif
**);
236 static uint32_t dpif_netlink_port_get_pid(const struct dpif
*,
238 static void dpif_netlink_handler_uninit(struct dpif_handler
*handler
);
239 static int dpif_netlink_refresh_channels(struct dpif_netlink
*,
240 uint32_t n_handlers
);
241 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport
*,
243 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport
*,
244 const struct ofpbuf
*);
245 static int dpif_netlink_port_query__(const struct dpif_netlink
*dpif
,
246 odp_port_t port_no
, const char *port_name
,
247 struct dpif_port
*dpif_port
);
249 static struct dpif_netlink
*
250 dpif_netlink_cast(const struct dpif
*dpif
)
252 dpif_assert_class(dpif
, &dpif_netlink_class
);
253 return CONTAINER_OF(dpif
, struct dpif_netlink
, dpif
);
257 dpif_netlink_enumerate(struct sset
*all_dps
,
258 const struct dpif_class
*dpif_class OVS_UNUSED
)
261 uint64_t reply_stub
[NL_DUMP_BUFSIZE
/ 8];
262 struct ofpbuf msg
, buf
;
265 error
= dpif_netlink_init();
270 ofpbuf_use_stub(&buf
, reply_stub
, sizeof reply_stub
);
271 dpif_netlink_dp_dump_start(&dump
);
272 while (nl_dump_next(&dump
, &msg
, &buf
)) {
273 struct dpif_netlink_dp dp
;
275 if (!dpif_netlink_dp_from_ofpbuf(&dp
, &msg
)) {
276 sset_add(all_dps
, dp
.name
);
280 return nl_dump_done(&dump
);
284 dpif_netlink_open(const struct dpif_class
*class OVS_UNUSED
, const char *name
,
285 bool create
, struct dpif
**dpifp
)
287 struct dpif_netlink_dp dp_request
, dp
;
292 error
= dpif_netlink_init();
297 /* Create or look up datapath. */
298 dpif_netlink_dp_init(&dp_request
);
300 dp_request
.cmd
= OVS_DP_CMD_NEW
;
302 dp_request
.upcall_pid
= &upcall_pid
;
304 /* Use OVS_DP_CMD_SET to report user features */
305 dp_request
.cmd
= OVS_DP_CMD_SET
;
307 dp_request
.name
= name
;
308 dp_request
.user_features
|= OVS_DP_F_UNALIGNED
;
309 dp_request
.user_features
|= OVS_DP_F_VPORT_PIDS
;
310 error
= dpif_netlink_dp_transact(&dp_request
, &dp
, &buf
);
315 error
= open_dpif(&dp
, dpifp
);
321 open_dpif(const struct dpif_netlink_dp
*dp
, struct dpif
**dpifp
)
323 struct dpif_netlink
*dpif
;
325 dpif
= xzalloc(sizeof *dpif
);
326 dpif
->port_notifier
= NULL
;
327 fat_rwlock_init(&dpif
->upcall_lock
);
329 dpif_init(&dpif
->dpif
, &dpif_netlink_class
, dp
->name
,
330 dp
->dp_ifindex
, dp
->dp_ifindex
);
332 dpif
->dp_ifindex
= dp
->dp_ifindex
;
333 *dpifp
= &dpif
->dpif
;
340 vport_delete_sock_pool(struct dpif_handler
*handler
)
341 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
343 if (handler
->vport_sock_pool
) {
345 struct dpif_windows_vport_sock
*sock_pool
=
346 handler
->vport_sock_pool
;
348 for (i
= 0; i
< VPORT_SOCK_POOL_SIZE
; i
++) {
349 if (sock_pool
[i
].nl_sock
) {
350 nl_sock_unsubscribe_packets(sock_pool
[i
].nl_sock
);
351 nl_sock_destroy(sock_pool
[i
].nl_sock
);
352 sock_pool
[i
].nl_sock
= NULL
;
356 free(handler
->vport_sock_pool
);
357 handler
->vport_sock_pool
= NULL
;
362 vport_create_sock_pool(struct dpif_handler
*handler
)
363 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
365 struct dpif_windows_vport_sock
*sock_pool
;
369 sock_pool
= xzalloc(VPORT_SOCK_POOL_SIZE
* sizeof *sock_pool
);
370 for (i
= 0; i
< VPORT_SOCK_POOL_SIZE
; i
++) {
371 error
= nl_sock_create(NETLINK_GENERIC
, &sock_pool
[i
].nl_sock
);
376 /* Enable the netlink socket to receive packets. This is equivalent to
377 * calling nl_sock_join_mcgroup() to receive events. */
378 error
= nl_sock_subscribe_packets(sock_pool
[i
].nl_sock
);
384 handler
->vport_sock_pool
= sock_pool
;
385 handler
->last_used_pool_idx
= 0;
389 vport_delete_sock_pool(handler
);
394 /* Given the port number 'port_idx', extracts the pid of netlink socket
395 * associated to the port and assigns it to 'upcall_pid'. */
397 vport_get_pid(struct dpif_netlink
*dpif
, uint32_t port_idx
,
398 uint32_t *upcall_pid
)
400 /* Since the nl_sock can only be assigned in either all
401 * or none "dpif" channels, the following check
403 if (!dpif
->channels
[port_idx
].sock
) {
406 ovs_assert(!WINDOWS
|| dpif
->n_handlers
<= 1);
408 *upcall_pid
= nl_sock_pid(dpif
->channels
[port_idx
].sock
);
414 vport_add_channel(struct dpif_netlink
*dpif
, odp_port_t port_no
,
415 struct nl_sock
*socksp
)
417 struct epoll_event event
;
418 uint32_t port_idx
= odp_to_u32(port_no
);
422 if (dpif
->handlers
== NULL
) {
426 /* We assume that the datapath densely chooses port numbers, which can
427 * therefore be used as an index into 'channels' and 'epoll_events' of
429 if (port_idx
>= dpif
->uc_array_size
) {
430 uint32_t new_size
= port_idx
+ 1;
432 if (new_size
> MAX_PORTS
) {
433 VLOG_WARN_RL(&error_rl
, "%s: datapath port %"PRIu32
" too big",
434 dpif_name(&dpif
->dpif
), port_no
);
438 dpif
->channels
= xrealloc(dpif
->channels
,
439 new_size
* sizeof *dpif
->channels
);
441 for (i
= dpif
->uc_array_size
; i
< new_size
; i
++) {
442 dpif
->channels
[i
].sock
= NULL
;
445 for (i
= 0; i
< dpif
->n_handlers
; i
++) {
446 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
448 handler
->epoll_events
= xrealloc(handler
->epoll_events
,
449 new_size
* sizeof *handler
->epoll_events
);
452 dpif
->uc_array_size
= new_size
;
455 memset(&event
, 0, sizeof event
);
456 event
.events
= EPOLLIN
| EPOLLEXCLUSIVE
;
457 event
.data
.u32
= port_idx
;
459 for (i
= 0; i
< dpif
->n_handlers
; i
++) {
460 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
463 if (epoll_ctl(handler
->epoll_fd
, EPOLL_CTL_ADD
, nl_sock_fd(socksp
),
470 dpif
->channels
[port_idx
].sock
= socksp
;
471 dpif
->channels
[port_idx
].last_poll
= LLONG_MIN
;
478 epoll_ctl(dpif
->handlers
[i
].epoll_fd
, EPOLL_CTL_DEL
,
479 nl_sock_fd(socksp
), NULL
);
482 dpif
->channels
[port_idx
].sock
= NULL
;
488 vport_del_channels(struct dpif_netlink
*dpif
, odp_port_t port_no
)
490 uint32_t port_idx
= odp_to_u32(port_no
);
493 if (!dpif
->handlers
|| port_idx
>= dpif
->uc_array_size
494 || !dpif
->channels
[port_idx
].sock
) {
498 for (i
= 0; i
< dpif
->n_handlers
; i
++) {
499 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
501 epoll_ctl(handler
->epoll_fd
, EPOLL_CTL_DEL
,
502 nl_sock_fd(dpif
->channels
[port_idx
].sock
), NULL
);
504 handler
->event_offset
= handler
->n_events
= 0;
507 nl_sock_destroy(dpif
->channels
[port_idx
].sock
);
509 dpif
->channels
[port_idx
].sock
= NULL
;
513 destroy_all_channels(struct dpif_netlink
*dpif
)
514 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
518 if (!dpif
->handlers
) {
522 for (i
= 0; i
< dpif
->uc_array_size
; i
++ ) {
523 struct dpif_netlink_vport vport_request
;
524 uint32_t upcall_pids
= 0;
526 if (!dpif
->channels
[i
].sock
) {
530 /* Turn off upcalls. */
531 dpif_netlink_vport_init(&vport_request
);
532 vport_request
.cmd
= OVS_VPORT_CMD_SET
;
533 vport_request
.dp_ifindex
= dpif
->dp_ifindex
;
534 vport_request
.port_no
= u32_to_odp(i
);
535 vport_request
.n_upcall_pids
= 1;
536 vport_request
.upcall_pids
= &upcall_pids
;
537 dpif_netlink_vport_transact(&vport_request
, NULL
, NULL
);
539 vport_del_channels(dpif
, u32_to_odp(i
));
542 for (i
= 0; i
< dpif
->n_handlers
; i
++) {
543 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
545 dpif_netlink_handler_uninit(handler
);
546 free(handler
->epoll_events
);
548 free(dpif
->channels
);
549 free(dpif
->handlers
);
550 dpif
->handlers
= NULL
;
551 dpif
->channels
= NULL
;
552 dpif
->n_handlers
= 0;
553 dpif
->uc_array_size
= 0;
557 dpif_netlink_close(struct dpif
*dpif_
)
559 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
561 nl_sock_destroy(dpif
->port_notifier
);
563 fat_rwlock_wrlock(&dpif
->upcall_lock
);
564 destroy_all_channels(dpif
);
565 fat_rwlock_unlock(&dpif
->upcall_lock
);
567 fat_rwlock_destroy(&dpif
->upcall_lock
);
572 dpif_netlink_destroy(struct dpif
*dpif_
)
574 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
575 struct dpif_netlink_dp dp
;
577 dpif_netlink_dp_init(&dp
);
578 dp
.cmd
= OVS_DP_CMD_DEL
;
579 dp
.dp_ifindex
= dpif
->dp_ifindex
;
580 return dpif_netlink_dp_transact(&dp
, NULL
, NULL
);
584 dpif_netlink_run(struct dpif
*dpif_
)
586 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
588 if (dpif
->refresh_channels
) {
589 dpif
->refresh_channels
= false;
590 fat_rwlock_wrlock(&dpif
->upcall_lock
);
591 dpif_netlink_refresh_channels(dpif
, dpif
->n_handlers
);
592 fat_rwlock_unlock(&dpif
->upcall_lock
);
598 dpif_netlink_get_stats(const struct dpif
*dpif_
, struct dpif_dp_stats
*stats
)
600 struct dpif_netlink_dp dp
;
604 error
= dpif_netlink_dp_get(dpif_
, &dp
, &buf
);
606 memset(stats
, 0, sizeof *stats
);
609 stats
->n_hit
= get_32aligned_u64(&dp
.stats
->n_hit
);
610 stats
->n_missed
= get_32aligned_u64(&dp
.stats
->n_missed
);
611 stats
->n_lost
= get_32aligned_u64(&dp
.stats
->n_lost
);
612 stats
->n_flows
= get_32aligned_u64(&dp
.stats
->n_flows
);
615 if (dp
.megaflow_stats
) {
616 stats
->n_masks
= dp
.megaflow_stats
->n_masks
;
617 stats
->n_mask_hit
= get_32aligned_u64(
618 &dp
.megaflow_stats
->n_mask_hit
);
620 stats
->n_masks
= UINT32_MAX
;
621 stats
->n_mask_hit
= UINT64_MAX
;
629 get_vport_type(const struct dpif_netlink_vport
*vport
)
631 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
633 switch (vport
->type
) {
634 case OVS_VPORT_TYPE_NETDEV
: {
635 const char *type
= netdev_get_type_from_name(vport
->name
);
637 return type
? type
: "system";
640 case OVS_VPORT_TYPE_INTERNAL
:
643 case OVS_VPORT_TYPE_GENEVE
:
646 case OVS_VPORT_TYPE_GRE
:
649 case OVS_VPORT_TYPE_VXLAN
:
652 case OVS_VPORT_TYPE_LISP
:
655 case OVS_VPORT_TYPE_STT
:
658 case OVS_VPORT_TYPE_ERSPAN
:
661 case OVS_VPORT_TYPE_IP6ERSPAN
:
664 case OVS_VPORT_TYPE_IP6GRE
:
667 case OVS_VPORT_TYPE_UNSPEC
:
668 case __OVS_VPORT_TYPE_MAX
:
672 VLOG_WARN_RL(&rl
, "dp%d: port `%s' has unsupported type %u",
673 vport
->dp_ifindex
, vport
->name
, (unsigned int) vport
->type
);
678 netdev_to_ovs_vport_type(const char *type
)
680 if (!strcmp(type
, "tap") || !strcmp(type
, "system")) {
681 return OVS_VPORT_TYPE_NETDEV
;
682 } else if (!strcmp(type
, "internal")) {
683 return OVS_VPORT_TYPE_INTERNAL
;
684 } else if (strstr(type
, "stt")) {
685 return OVS_VPORT_TYPE_STT
;
686 } else if (!strcmp(type
, "geneve")) {
687 return OVS_VPORT_TYPE_GENEVE
;
688 } else if (!strcmp(type
, "vxlan")) {
689 return OVS_VPORT_TYPE_VXLAN
;
690 } else if (!strcmp(type
, "lisp")) {
691 return OVS_VPORT_TYPE_LISP
;
692 } else if (!strcmp(type
, "erspan")) {
693 return OVS_VPORT_TYPE_ERSPAN
;
694 } else if (!strcmp(type
, "ip6erspan")) {
695 return OVS_VPORT_TYPE_IP6ERSPAN
;
696 } else if (!strcmp(type
, "ip6gre")) {
697 return OVS_VPORT_TYPE_IP6GRE
;
698 } else if (!strcmp(type
, "gre")) {
699 return OVS_VPORT_TYPE_GRE
;
701 return OVS_VPORT_TYPE_UNSPEC
;
706 dpif_netlink_port_add__(struct dpif_netlink
*dpif
, const char *name
,
707 enum ovs_vport_type type
,
708 struct ofpbuf
*options
,
709 odp_port_t
*port_nop
)
710 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
712 struct dpif_netlink_vport request
, reply
;
714 struct nl_sock
*socksp
= NULL
;
715 uint32_t upcall_pids
= 0;
718 if (dpif
->handlers
) {
719 if (nl_sock_create(NETLINK_GENERIC
, &socksp
)) {
724 dpif_netlink_vport_init(&request
);
725 request
.cmd
= OVS_VPORT_CMD_NEW
;
726 request
.dp_ifindex
= dpif
->dp_ifindex
;
730 request
.port_no
= *port_nop
;
732 upcall_pids
= nl_sock_pid(socksp
);
734 request
.n_upcall_pids
= 1;
735 request
.upcall_pids
= &upcall_pids
;
738 request
.options
= options
->data
;
739 request
.options_len
= options
->size
;
742 error
= dpif_netlink_vport_transact(&request
, &reply
, &buf
);
744 *port_nop
= reply
.port_no
;
746 if (error
== EBUSY
&& *port_nop
!= ODPP_NONE
) {
747 VLOG_INFO("%s: requested port %"PRIu32
" is in use",
748 dpif_name(&dpif
->dpif
), *port_nop
);
751 nl_sock_destroy(socksp
);
755 error
= vport_add_channel(dpif
, *port_nop
, socksp
);
757 VLOG_INFO("%s: could not add channel for port %s",
758 dpif_name(&dpif
->dpif
), name
);
760 /* Delete the port. */
761 dpif_netlink_vport_init(&request
);
762 request
.cmd
= OVS_VPORT_CMD_DEL
;
763 request
.dp_ifindex
= dpif
->dp_ifindex
;
764 request
.port_no
= *port_nop
;
765 dpif_netlink_vport_transact(&request
, NULL
, NULL
);
766 nl_sock_destroy(socksp
);
777 dpif_netlink_port_add_compat(struct dpif_netlink
*dpif
, struct netdev
*netdev
,
778 odp_port_t
*port_nop
)
779 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
781 const struct netdev_tunnel_config
*tnl_cfg
;
782 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
783 const char *type
= netdev_get_type(netdev
);
784 uint64_t options_stub
[64 / 8];
785 enum ovs_vport_type ovs_type
;
786 struct ofpbuf options
;
789 name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
791 ovs_type
= netdev_to_ovs_vport_type(netdev_get_type(netdev
));
792 if (ovs_type
== OVS_VPORT_TYPE_UNSPEC
) {
793 VLOG_WARN_RL(&error_rl
, "%s: cannot create port `%s' because it has "
794 "unsupported type `%s'",
795 dpif_name(&dpif
->dpif
), name
, type
);
799 if (ovs_type
== OVS_VPORT_TYPE_NETDEV
) {
801 /* XXX : Map appropiate Windows handle */
803 netdev_linux_ethtool_set_flag(netdev
, ETH_FLAG_LRO
, "LRO", false);
808 if (ovs_type
== OVS_VPORT_TYPE_INTERNAL
) {
809 if (!create_wmi_port(name
)){
810 VLOG_ERR("Could not create wmi internal port with name:%s", name
);
816 tnl_cfg
= netdev_get_tunnel_config(netdev
);
817 if (tnl_cfg
&& (tnl_cfg
->dst_port
!= 0 || tnl_cfg
->exts
)) {
818 ofpbuf_use_stack(&options
, options_stub
, sizeof options_stub
);
819 if (tnl_cfg
->dst_port
) {
820 nl_msg_put_u16(&options
, OVS_TUNNEL_ATTR_DST_PORT
,
821 ntohs(tnl_cfg
->dst_port
));
827 ext_ofs
= nl_msg_start_nested(&options
, OVS_TUNNEL_ATTR_EXTENSION
);
828 for (i
= 0; i
< 32; i
++) {
829 if (tnl_cfg
->exts
& (1 << i
)) {
830 nl_msg_put_flag(&options
, i
);
833 nl_msg_end_nested(&options
, ext_ofs
);
835 return dpif_netlink_port_add__(dpif
, name
, ovs_type
, &options
,
838 return dpif_netlink_port_add__(dpif
, name
, ovs_type
, NULL
, port_nop
);
844 dpif_netlink_rtnl_port_create_and_add(struct dpif_netlink
*dpif
,
845 struct netdev
*netdev
,
846 odp_port_t
*port_nop
)
847 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
849 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
850 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
854 error
= dpif_netlink_rtnl_port_create(netdev
);
856 if (error
!= EOPNOTSUPP
) {
857 VLOG_WARN_RL(&rl
, "Failed to create %s with rtnetlink: %s",
858 netdev_get_name(netdev
), ovs_strerror(error
));
863 name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
864 error
= dpif_netlink_port_add__(dpif
, name
, OVS_VPORT_TYPE_NETDEV
, NULL
,
867 dpif_netlink_rtnl_port_destroy(name
, netdev_get_type(netdev
));
873 dpif_netlink_port_add(struct dpif
*dpif_
, struct netdev
*netdev
,
874 odp_port_t
*port_nop
)
876 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
877 int error
= EOPNOTSUPP
;
879 fat_rwlock_wrlock(&dpif
->upcall_lock
);
880 if (!ovs_tunnels_out_of_tree
) {
881 error
= dpif_netlink_rtnl_port_create_and_add(dpif
, netdev
, port_nop
);
884 error
= dpif_netlink_port_add_compat(dpif
, netdev
, port_nop
);
886 fat_rwlock_unlock(&dpif
->upcall_lock
);
892 dpif_netlink_port_del__(struct dpif_netlink
*dpif
, odp_port_t port_no
)
893 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
895 struct dpif_netlink_vport vport
;
896 struct dpif_port dpif_port
;
899 error
= dpif_netlink_port_query__(dpif
, port_no
, NULL
, &dpif_port
);
904 dpif_netlink_vport_init(&vport
);
905 vport
.cmd
= OVS_VPORT_CMD_DEL
;
906 vport
.dp_ifindex
= dpif
->dp_ifindex
;
907 vport
.port_no
= port_no
;
909 if (!strcmp(dpif_port
.type
, "internal")) {
910 if (!delete_wmi_port(dpif_port
.name
)) {
911 VLOG_ERR("Could not delete wmi port with name: %s",
916 error
= dpif_netlink_vport_transact(&vport
, NULL
, NULL
);
918 vport_del_channels(dpif
, port_no
);
920 if (!error
&& !ovs_tunnels_out_of_tree
) {
921 error
= dpif_netlink_rtnl_port_destroy(dpif_port
.name
, dpif_port
.type
);
922 if (error
== EOPNOTSUPP
) {
927 dpif_port_destroy(&dpif_port
);
933 dpif_netlink_port_del(struct dpif
*dpif_
, odp_port_t port_no
)
935 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
938 fat_rwlock_wrlock(&dpif
->upcall_lock
);
939 error
= dpif_netlink_port_del__(dpif
, port_no
);
940 fat_rwlock_unlock(&dpif
->upcall_lock
);
946 dpif_netlink_port_query__(const struct dpif_netlink
*dpif
, odp_port_t port_no
,
947 const char *port_name
, struct dpif_port
*dpif_port
)
949 struct dpif_netlink_vport request
;
950 struct dpif_netlink_vport reply
;
954 dpif_netlink_vport_init(&request
);
955 request
.cmd
= OVS_VPORT_CMD_GET
;
956 request
.dp_ifindex
= dpif
->dp_ifindex
;
957 request
.port_no
= port_no
;
958 request
.name
= port_name
;
960 error
= dpif_netlink_vport_transact(&request
, &reply
, &buf
);
962 if (reply
.dp_ifindex
!= request
.dp_ifindex
) {
963 /* A query by name reported that 'port_name' is in some datapath
964 * other than 'dpif', but the caller wants to know about 'dpif'. */
966 } else if (dpif_port
) {
967 dpif_port
->name
= xstrdup(reply
.name
);
968 dpif_port
->type
= xstrdup(get_vport_type(&reply
));
969 dpif_port
->port_no
= reply
.port_no
;
977 dpif_netlink_port_query_by_number(const struct dpif
*dpif_
, odp_port_t port_no
,
978 struct dpif_port
*dpif_port
)
980 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
982 return dpif_netlink_port_query__(dpif
, port_no
, NULL
, dpif_port
);
986 dpif_netlink_port_query_by_name(const struct dpif
*dpif_
, const char *devname
,
987 struct dpif_port
*dpif_port
)
989 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
991 return dpif_netlink_port_query__(dpif
, 0, devname
, dpif_port
);
995 dpif_netlink_port_get_pid__(const struct dpif_netlink
*dpif
,
997 OVS_REQ_RDLOCK(dpif
->upcall_lock
)
999 uint32_t port_idx
= odp_to_u32(port_no
);
1002 if (dpif
->handlers
&& dpif
->uc_array_size
> 0) {
1003 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1004 * channel, since it is not heavily loaded. */
1005 uint32_t idx
= port_idx
>= dpif
->uc_array_size
? 0 : port_idx
;
1007 /* Needs to check in case the socket pointer is changed in between
1008 * the holding of upcall_lock. A known case happens when the main
1009 * thread deletes the vport while the handler thread is handling
1010 * the upcall from that port. */
1011 if (dpif
->channels
[idx
].sock
) {
1012 pid
= nl_sock_pid(dpif
->channels
[idx
].sock
);
1020 dpif_netlink_port_get_pid(const struct dpif
*dpif_
, odp_port_t port_no
)
1022 const struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1025 fat_rwlock_rdlock(&dpif
->upcall_lock
);
1026 ret
= dpif_netlink_port_get_pid__(dpif
, port_no
);
1027 fat_rwlock_unlock(&dpif
->upcall_lock
);
1033 dpif_netlink_flow_flush(struct dpif
*dpif_
)
1035 const struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1036 struct dpif_netlink_flow flow
;
1038 dpif_netlink_flow_init(&flow
);
1039 flow
.cmd
= OVS_FLOW_CMD_DEL
;
1040 flow
.dp_ifindex
= dpif
->dp_ifindex
;
1042 if (netdev_is_flow_api_enabled()) {
1043 netdev_ports_flow_flush(dpif_
->dpif_class
);
1046 return dpif_netlink_flow_transact(&flow
, NULL
, NULL
);
1049 struct dpif_netlink_port_state
{
1050 struct nl_dump dump
;
1055 dpif_netlink_port_dump_start__(const struct dpif_netlink
*dpif
,
1056 struct nl_dump
*dump
)
1058 struct dpif_netlink_vport request
;
1061 dpif_netlink_vport_init(&request
);
1062 request
.cmd
= OVS_VPORT_CMD_GET
;
1063 request
.dp_ifindex
= dpif
->dp_ifindex
;
1065 buf
= ofpbuf_new(1024);
1066 dpif_netlink_vport_to_ofpbuf(&request
, buf
);
1067 nl_dump_start(dump
, NETLINK_GENERIC
, buf
);
1072 dpif_netlink_port_dump_start(const struct dpif
*dpif_
, void **statep
)
1074 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1075 struct dpif_netlink_port_state
*state
;
1077 *statep
= state
= xmalloc(sizeof *state
);
1078 dpif_netlink_port_dump_start__(dpif
, &state
->dump
);
1080 ofpbuf_init(&state
->buf
, NL_DUMP_BUFSIZE
);
1085 dpif_netlink_port_dump_next__(const struct dpif_netlink
*dpif
,
1086 struct nl_dump
*dump
,
1087 struct dpif_netlink_vport
*vport
,
1088 struct ofpbuf
*buffer
)
1093 if (!nl_dump_next(dump
, &buf
, buffer
)) {
1097 error
= dpif_netlink_vport_from_ofpbuf(vport
, &buf
);
1099 VLOG_WARN_RL(&error_rl
, "%s: failed to parse vport record (%s)",
1100 dpif_name(&dpif
->dpif
), ovs_strerror(error
));
1106 dpif_netlink_port_dump_next(const struct dpif
*dpif_
, void *state_
,
1107 struct dpif_port
*dpif_port
)
1109 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1110 struct dpif_netlink_port_state
*state
= state_
;
1111 struct dpif_netlink_vport vport
;
1114 error
= dpif_netlink_port_dump_next__(dpif
, &state
->dump
, &vport
,
1119 dpif_port
->name
= CONST_CAST(char *, vport
.name
);
1120 dpif_port
->type
= CONST_CAST(char *, get_vport_type(&vport
));
1121 dpif_port
->port_no
= vport
.port_no
;
1126 dpif_netlink_port_dump_done(const struct dpif
*dpif_ OVS_UNUSED
, void *state_
)
1128 struct dpif_netlink_port_state
*state
= state_
;
1129 int error
= nl_dump_done(&state
->dump
);
1131 ofpbuf_uninit(&state
->buf
);
1137 dpif_netlink_port_poll(const struct dpif
*dpif_
, char **devnamep
)
1139 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1141 /* Lazily create the Netlink socket to listen for notifications. */
1142 if (!dpif
->port_notifier
) {
1143 struct nl_sock
*sock
;
1146 error
= nl_sock_create(NETLINK_GENERIC
, &sock
);
1151 error
= nl_sock_join_mcgroup(sock
, ovs_vport_mcgroup
);
1153 nl_sock_destroy(sock
);
1156 dpif
->port_notifier
= sock
;
1158 /* We have no idea of the current state so report that everything
1164 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1165 uint64_t buf_stub
[4096 / 8];
1169 ofpbuf_use_stub(&buf
, buf_stub
, sizeof buf_stub
);
1170 error
= nl_sock_recv(dpif
->port_notifier
, &buf
, NULL
, false);
1172 struct dpif_netlink_vport vport
;
1174 error
= dpif_netlink_vport_from_ofpbuf(&vport
, &buf
);
1176 if (vport
.dp_ifindex
== dpif
->dp_ifindex
1177 && (vport
.cmd
== OVS_VPORT_CMD_NEW
1178 || vport
.cmd
== OVS_VPORT_CMD_DEL
1179 || vport
.cmd
== OVS_VPORT_CMD_SET
)) {
1180 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8
,
1181 dpif
->dpif
.full_name
, vport
.name
, vport
.cmd
);
1182 if (vport
.cmd
== OVS_VPORT_CMD_DEL
&& dpif
->handlers
) {
1183 dpif
->refresh_channels
= true;
1185 *devnamep
= xstrdup(vport
.name
);
1186 ofpbuf_uninit(&buf
);
1190 } else if (error
!= EAGAIN
) {
1191 VLOG_WARN_RL(&rl
, "error reading or parsing netlink (%s)",
1192 ovs_strerror(error
));
1193 nl_sock_drain(dpif
->port_notifier
);
1197 ofpbuf_uninit(&buf
);
1205 dpif_netlink_port_poll_wait(const struct dpif
*dpif_
)
1207 const struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1209 if (dpif
->port_notifier
) {
1210 nl_sock_wait(dpif
->port_notifier
, POLLIN
);
1212 poll_immediate_wake();
1217 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow
*request
,
1218 const ovs_u128
*ufid
, bool terse
)
1221 request
->ufid
= *ufid
;
1222 request
->ufid_present
= true;
1224 request
->ufid_present
= false;
1226 request
->ufid_terse
= terse
;
1230 dpif_netlink_init_flow_get__(const struct dpif_netlink
*dpif
,
1231 const struct nlattr
*key
, size_t key_len
,
1232 const ovs_u128
*ufid
, bool terse
,
1233 struct dpif_netlink_flow
*request
)
1235 dpif_netlink_flow_init(request
);
1236 request
->cmd
= OVS_FLOW_CMD_GET
;
1237 request
->dp_ifindex
= dpif
->dp_ifindex
;
1239 request
->key_len
= key_len
;
1240 dpif_netlink_flow_init_ufid(request
, ufid
, terse
);
1244 dpif_netlink_init_flow_get(const struct dpif_netlink
*dpif
,
1245 const struct dpif_flow_get
*get
,
1246 struct dpif_netlink_flow
*request
)
1248 dpif_netlink_init_flow_get__(dpif
, get
->key
, get
->key_len
, get
->ufid
,
1253 dpif_netlink_flow_get__(const struct dpif_netlink
*dpif
,
1254 const struct nlattr
*key
, size_t key_len
,
1255 const ovs_u128
*ufid
, bool terse
,
1256 struct dpif_netlink_flow
*reply
, struct ofpbuf
**bufp
)
1258 struct dpif_netlink_flow request
;
1260 dpif_netlink_init_flow_get__(dpif
, key
, key_len
, ufid
, terse
, &request
);
1261 return dpif_netlink_flow_transact(&request
, reply
, bufp
);
1265 dpif_netlink_flow_get(const struct dpif_netlink
*dpif
,
1266 const struct dpif_netlink_flow
*flow
,
1267 struct dpif_netlink_flow
*reply
, struct ofpbuf
**bufp
)
1269 return dpif_netlink_flow_get__(dpif
, flow
->key
, flow
->key_len
,
1270 flow
->ufid_present
? &flow
->ufid
: NULL
,
1271 false, reply
, bufp
);
1275 dpif_netlink_init_flow_put(struct dpif_netlink
*dpif
,
1276 const struct dpif_flow_put
*put
,
1277 struct dpif_netlink_flow
*request
)
1279 static const struct nlattr dummy_action
;
1281 dpif_netlink_flow_init(request
);
1282 request
->cmd
= (put
->flags
& DPIF_FP_CREATE
1283 ? OVS_FLOW_CMD_NEW
: OVS_FLOW_CMD_SET
);
1284 request
->dp_ifindex
= dpif
->dp_ifindex
;
1285 request
->key
= put
->key
;
1286 request
->key_len
= put
->key_len
;
1287 request
->mask
= put
->mask
;
1288 request
->mask_len
= put
->mask_len
;
1289 dpif_netlink_flow_init_ufid(request
, put
->ufid
, false);
1291 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1292 request
->actions
= (put
->actions
1294 : CONST_CAST(struct nlattr
*, &dummy_action
));
1295 request
->actions_len
= put
->actions_len
;
1296 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
1297 request
->clear
= true;
1299 if (put
->flags
& DPIF_FP_PROBE
) {
1300 request
->probe
= true;
1302 request
->nlmsg_flags
= put
->flags
& DPIF_FP_MODIFY
? 0 : NLM_F_CREATE
;
1306 dpif_netlink_init_flow_del__(struct dpif_netlink
*dpif
,
1307 const struct nlattr
*key
, size_t key_len
,
1308 const ovs_u128
*ufid
, bool terse
,
1309 struct dpif_netlink_flow
*request
)
1311 dpif_netlink_flow_init(request
);
1312 request
->cmd
= OVS_FLOW_CMD_DEL
;
1313 request
->dp_ifindex
= dpif
->dp_ifindex
;
1315 request
->key_len
= key_len
;
1316 dpif_netlink_flow_init_ufid(request
, ufid
, terse
);
1320 dpif_netlink_init_flow_del(struct dpif_netlink
*dpif
,
1321 const struct dpif_flow_del
*del
,
1322 struct dpif_netlink_flow
*request
)
1324 dpif_netlink_init_flow_del__(dpif
, del
->key
, del
->key_len
,
1325 del
->ufid
, del
->terse
, request
);
1328 struct dpif_netlink_flow_dump
{
1329 struct dpif_flow_dump up
;
1330 struct nl_dump nl_dump
;
1332 struct netdev_flow_dump
**netdev_dumps
;
1333 int netdev_dumps_num
; /* Number of netdev_flow_dumps */
1334 struct ovs_mutex netdev_lock
; /* Guards the following. */
1335 int netdev_current_dump OVS_GUARDED
; /* Shared current dump */
1336 struct dpif_flow_dump_types types
; /* Type of dump */
1339 static struct dpif_netlink_flow_dump
*
1340 dpif_netlink_flow_dump_cast(struct dpif_flow_dump
*dump
)
1342 return CONTAINER_OF(dump
, struct dpif_netlink_flow_dump
, up
);
1346 start_netdev_dump(const struct dpif
*dpif_
,
1347 struct dpif_netlink_flow_dump
*dump
)
1349 ovs_mutex_init(&dump
->netdev_lock
);
1351 if (!(dump
->types
.netdev_flows
)) {
1352 dump
->netdev_dumps_num
= 0;
1353 dump
->netdev_dumps
= NULL
;
1357 ovs_mutex_lock(&dump
->netdev_lock
);
1358 dump
->netdev_current_dump
= 0;
1360 = netdev_ports_flow_dump_create(dpif_
->dpif_class
,
1361 &dump
->netdev_dumps_num
);
1362 ovs_mutex_unlock(&dump
->netdev_lock
);
1366 dpif_netlink_populate_flow_dump_types(struct dpif_netlink_flow_dump
*dump
,
1367 struct dpif_flow_dump_types
*types
)
1370 dump
->types
.ovs_flows
= true;
1371 dump
->types
.netdev_flows
= true;
1373 memcpy(&dump
->types
, types
, sizeof *types
);
1377 static struct dpif_flow_dump
*
1378 dpif_netlink_flow_dump_create(const struct dpif
*dpif_
, bool terse
,
1379 struct dpif_flow_dump_types
*types
)
1381 const struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1382 struct dpif_netlink_flow_dump
*dump
;
1383 struct dpif_netlink_flow request
;
1386 dump
= xmalloc(sizeof *dump
);
1387 dpif_flow_dump_init(&dump
->up
, dpif_
);
1389 dpif_netlink_populate_flow_dump_types(dump
, types
);
1391 if (dump
->types
.ovs_flows
) {
1392 dpif_netlink_flow_init(&request
);
1393 request
.cmd
= OVS_FLOW_CMD_GET
;
1394 request
.dp_ifindex
= dpif
->dp_ifindex
;
1395 request
.ufid_present
= false;
1396 request
.ufid_terse
= terse
;
1398 buf
= ofpbuf_new(1024);
1399 dpif_netlink_flow_to_ofpbuf(&request
, buf
);
1400 nl_dump_start(&dump
->nl_dump
, NETLINK_GENERIC
, buf
);
1403 atomic_init(&dump
->status
, 0);
1404 dump
->up
.terse
= terse
;
1406 start_netdev_dump(dpif_
, dump
);
1412 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
1414 struct dpif_netlink_flow_dump
*dump
= dpif_netlink_flow_dump_cast(dump_
);
1415 unsigned int nl_status
= 0;
1418 if (dump
->types
.ovs_flows
) {
1419 nl_status
= nl_dump_done(&dump
->nl_dump
);
1422 for (int i
= 0; i
< dump
->netdev_dumps_num
; i
++) {
1423 int err
= netdev_flow_dump_destroy(dump
->netdev_dumps
[i
]);
1425 if (err
!= 0 && err
!= EOPNOTSUPP
) {
1426 VLOG_ERR("failed dumping netdev: %s", ovs_strerror(err
));
1430 free(dump
->netdev_dumps
);
1431 ovs_mutex_destroy(&dump
->netdev_lock
);
1433 /* No other thread has access to 'dump' at this point. */
1434 atomic_read_relaxed(&dump
->status
, &dump_status
);
1436 return dump_status
? dump_status
: nl_status
;
1439 struct dpif_netlink_flow_dump_thread
{
1440 struct dpif_flow_dump_thread up
;
1441 struct dpif_netlink_flow_dump
*dump
;
1442 struct dpif_netlink_flow flow
;
1443 struct dpif_flow_stats stats
;
1444 struct ofpbuf nl_flows
; /* Always used to store flows. */
1445 struct ofpbuf
*nl_actions
; /* Used if kernel does not supply actions. */
1446 int netdev_dump_idx
; /* This thread current netdev dump index */
1447 bool netdev_done
; /* If we are finished dumping netdevs */
1449 /* (Key/Mask/Actions) Buffers for netdev dumping */
1450 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
1451 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
1452 struct odputil_keybuf actbuf
[FLOW_DUMP_MAX_BATCH
];
1455 static struct dpif_netlink_flow_dump_thread
*
1456 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
1458 return CONTAINER_OF(thread
, struct dpif_netlink_flow_dump_thread
, up
);
1461 static struct dpif_flow_dump_thread
*
1462 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
1464 struct dpif_netlink_flow_dump
*dump
= dpif_netlink_flow_dump_cast(dump_
);
1465 struct dpif_netlink_flow_dump_thread
*thread
;
1467 thread
= xmalloc(sizeof *thread
);
1468 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
1469 thread
->dump
= dump
;
1470 ofpbuf_init(&thread
->nl_flows
, NL_DUMP_BUFSIZE
);
1471 thread
->nl_actions
= NULL
;
1472 thread
->netdev_dump_idx
= 0;
1473 thread
->netdev_done
= !(thread
->netdev_dump_idx
< dump
->netdev_dumps_num
);
1479 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
1481 struct dpif_netlink_flow_dump_thread
*thread
1482 = dpif_netlink_flow_dump_thread_cast(thread_
);
1484 ofpbuf_uninit(&thread
->nl_flows
);
1485 ofpbuf_delete(thread
->nl_actions
);
1490 dpif_netlink_flow_to_dpif_flow(struct dpif
*dpif
, struct dpif_flow
*dpif_flow
,
1491 const struct dpif_netlink_flow
*datapath_flow
)
1493 dpif_flow
->key
= datapath_flow
->key
;
1494 dpif_flow
->key_len
= datapath_flow
->key_len
;
1495 dpif_flow
->mask
= datapath_flow
->mask
;
1496 dpif_flow
->mask_len
= datapath_flow
->mask_len
;
1497 dpif_flow
->actions
= datapath_flow
->actions
;
1498 dpif_flow
->actions_len
= datapath_flow
->actions_len
;
1499 dpif_flow
->ufid_present
= datapath_flow
->ufid_present
;
1500 dpif_flow
->pmd_id
= PMD_ID_NULL
;
1501 if (datapath_flow
->ufid_present
) {
1502 dpif_flow
->ufid
= datapath_flow
->ufid
;
1504 ovs_assert(datapath_flow
->key
&& datapath_flow
->key_len
);
1505 dpif_flow_hash(dpif
, datapath_flow
->key
, datapath_flow
->key_len
,
1508 dpif_netlink_flow_get_stats(datapath_flow
, &dpif_flow
->stats
);
1509 dpif_flow
->attrs
.offloaded
= false;
1510 dpif_flow
->attrs
.dp_layer
= "ovs";
1513 /* The design is such that all threads are working together on the first dump
1514 * to the last, in order (at first they all on dump 0).
1515 * When the first thread finds that the given dump is finished,
1516 * they all move to the next. If two or more threads find the same dump
1517 * is finished at the same time, the first one will advance the shared
1518 * netdev_current_dump and the others will catch up. */
1520 dpif_netlink_advance_netdev_dump(struct dpif_netlink_flow_dump_thread
*thread
)
1522 struct dpif_netlink_flow_dump
*dump
= thread
->dump
;
1524 ovs_mutex_lock(&dump
->netdev_lock
);
1525 /* if we haven't finished (dumped everything) */
1526 if (dump
->netdev_current_dump
< dump
->netdev_dumps_num
) {
1527 /* if we are the first to find that current dump is finished
1529 if (thread
->netdev_dump_idx
== dump
->netdev_current_dump
) {
1530 thread
->netdev_dump_idx
= ++dump
->netdev_current_dump
;
1531 /* did we just finish the last dump? done. */
1532 if (dump
->netdev_current_dump
== dump
->netdev_dumps_num
) {
1533 thread
->netdev_done
= true;
1536 /* otherwise, we are behind, catch up */
1537 thread
->netdev_dump_idx
= dump
->netdev_current_dump
;
1540 /* some other thread finished */
1541 thread
->netdev_done
= true;
1543 ovs_mutex_unlock(&dump
->netdev_lock
);
1547 dpif_netlink_netdev_match_to_dpif_flow(struct match
*match
,
1548 struct ofpbuf
*key_buf
,
1549 struct ofpbuf
*mask_buf
,
1550 struct nlattr
*actions
,
1551 struct dpif_flow_stats
*stats
,
1552 struct dpif_flow_attrs
*attrs
,
1554 struct dpif_flow
*flow
,
1555 bool terse OVS_UNUSED
)
1558 struct odp_flow_key_parms odp_parms
= {
1559 .flow
= &match
->flow
,
1560 .mask
= &match
->wc
.masks
,
1562 .max_vlan_headers
= 2,
1567 memset(flow
, 0, sizeof *flow
);
1570 offset
= key_buf
->size
;
1571 flow
->key
= ofpbuf_tail(key_buf
);
1572 odp_flow_key_from_flow(&odp_parms
, key_buf
);
1573 flow
->key_len
= key_buf
->size
- offset
;
1576 offset
= mask_buf
->size
;
1577 flow
->mask
= ofpbuf_tail(mask_buf
);
1578 odp_parms
.key_buf
= key_buf
;
1579 odp_flow_key_from_mask(&odp_parms
, mask_buf
);
1580 flow
->mask_len
= mask_buf
->size
- offset
;
1583 flow
->actions
= nl_attr_get(actions
);
1584 flow
->actions_len
= nl_attr_get_size(actions
);
1587 memcpy(&flow
->stats
, stats
, sizeof *stats
);
1590 flow
->ufid_present
= true;
1593 flow
->pmd_id
= PMD_ID_NULL
;
1595 memcpy(&flow
->attrs
, attrs
, sizeof *attrs
);
1601 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
1602 struct dpif_flow
*flows
, int max_flows
)
1604 struct dpif_netlink_flow_dump_thread
*thread
1605 = dpif_netlink_flow_dump_thread_cast(thread_
);
1606 struct dpif_netlink_flow_dump
*dump
= thread
->dump
;
1607 struct dpif_netlink
*dpif
= dpif_netlink_cast(thread
->up
.dpif
);
1610 ofpbuf_delete(thread
->nl_actions
);
1611 thread
->nl_actions
= NULL
;
1614 max_flows
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
1616 while (!thread
->netdev_done
&& n_flows
< max_flows
) {
1617 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[n_flows
];
1618 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[n_flows
];
1619 struct odputil_keybuf
*actbuf
= &thread
->actbuf
[n_flows
];
1620 struct ofpbuf key
, mask
, act
;
1621 struct dpif_flow
*f
= &flows
[n_flows
];
1622 int cur
= thread
->netdev_dump_idx
;
1623 struct netdev_flow_dump
*netdev_dump
= dump
->netdev_dumps
[cur
];
1625 struct nlattr
*actions
;
1626 struct dpif_flow_stats stats
;
1627 struct dpif_flow_attrs attrs
;
1631 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
1632 ofpbuf_use_stack(&act
, actbuf
, sizeof *actbuf
);
1633 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
1634 has_next
= netdev_flow_dump_next(netdev_dump
, &match
,
1635 &actions
, &stats
, &attrs
,
1640 dpif_netlink_netdev_match_to_dpif_flow(&match
,
1650 dpif_netlink_advance_netdev_dump(thread
);
1654 if (!(dump
->types
.ovs_flows
)) {
1659 || (n_flows
< max_flows
&& thread
->nl_flows
.size
)) {
1660 struct dpif_netlink_flow datapath_flow
;
1661 struct ofpbuf nl_flow
;
1664 /* Try to grab another flow. */
1665 if (!nl_dump_next(&dump
->nl_dump
, &nl_flow
, &thread
->nl_flows
)) {
1669 /* Convert the flow to our output format. */
1670 error
= dpif_netlink_flow_from_ofpbuf(&datapath_flow
, &nl_flow
);
1672 atomic_store_relaxed(&dump
->status
, error
);
1676 if (dump
->up
.terse
|| datapath_flow
.actions
) {
1677 /* Common case: we don't want actions, or the flow includes
1679 dpif_netlink_flow_to_dpif_flow(&dpif
->dpif
, &flows
[n_flows
++],
1682 /* Rare case: the flow does not include actions. Retrieve this
1683 * individual flow again to get the actions. */
1684 error
= dpif_netlink_flow_get(dpif
, &datapath_flow
,
1685 &datapath_flow
, &thread
->nl_actions
);
1686 if (error
== ENOENT
) {
1687 VLOG_DBG("dumped flow disappeared on get");
1690 VLOG_WARN("error fetching dumped flow: %s",
1691 ovs_strerror(error
));
1692 atomic_store_relaxed(&dump
->status
, error
);
1696 /* Save this flow. Then exit, because we only have one buffer to
1697 * handle this case. */
1698 dpif_netlink_flow_to_dpif_flow(&dpif
->dpif
, &flows
[n_flows
++],
1707 dpif_netlink_encode_execute(int dp_ifindex
, const struct dpif_execute
*d_exec
,
1710 struct ovs_header
*k_exec
;
1713 ofpbuf_prealloc_tailroom(buf
, (64
1714 + dp_packet_size(d_exec
->packet
)
1715 + ODP_KEY_METADATA_SIZE
1716 + d_exec
->actions_len
));
1718 nl_msg_put_genlmsghdr(buf
, 0, ovs_packet_family
, NLM_F_REQUEST
,
1719 OVS_PACKET_CMD_EXECUTE
, OVS_PACKET_VERSION
);
1721 k_exec
= ofpbuf_put_uninit(buf
, sizeof *k_exec
);
1722 k_exec
->dp_ifindex
= dp_ifindex
;
1724 nl_msg_put_unspec(buf
, OVS_PACKET_ATTR_PACKET
,
1725 dp_packet_data(d_exec
->packet
),
1726 dp_packet_size(d_exec
->packet
));
1728 key_ofs
= nl_msg_start_nested(buf
, OVS_PACKET_ATTR_KEY
);
1729 odp_key_from_dp_packet(buf
, d_exec
->packet
);
1730 nl_msg_end_nested(buf
, key_ofs
);
1732 nl_msg_put_unspec(buf
, OVS_PACKET_ATTR_ACTIONS
,
1733 d_exec
->actions
, d_exec
->actions_len
);
1734 if (d_exec
->probe
) {
1735 nl_msg_put_flag(buf
, OVS_PACKET_ATTR_PROBE
);
1738 nl_msg_put_u16(buf
, OVS_PACKET_ATTR_MRU
, d_exec
->mtu
);
1742 /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
1743 * Returns the number actually executed (at least 1, if 'n_ops' is
1746 dpif_netlink_operate__(struct dpif_netlink
*dpif
,
1747 struct dpif_op
**ops
, size_t n_ops
)
1750 struct nl_transaction txn
;
1752 struct ofpbuf request
;
1753 uint64_t request_stub
[1024 / 8];
1755 struct ofpbuf reply
;
1756 uint64_t reply_stub
[1024 / 8];
1757 } auxes
[OPERATE_MAX_OPS
];
1759 struct nl_transaction
*txnsp
[OPERATE_MAX_OPS
];
1762 n_ops
= MIN(n_ops
, OPERATE_MAX_OPS
);
1763 for (i
= 0; i
< n_ops
; i
++) {
1764 struct op_auxdata
*aux
= &auxes
[i
];
1765 struct dpif_op
*op
= ops
[i
];
1766 struct dpif_flow_put
*put
;
1767 struct dpif_flow_del
*del
;
1768 struct dpif_flow_get
*get
;
1769 struct dpif_netlink_flow flow
;
1771 ofpbuf_use_stub(&aux
->request
,
1772 aux
->request_stub
, sizeof aux
->request_stub
);
1773 aux
->txn
.request
= &aux
->request
;
1775 ofpbuf_use_stub(&aux
->reply
, aux
->reply_stub
, sizeof aux
->reply_stub
);
1776 aux
->txn
.reply
= NULL
;
1779 case DPIF_OP_FLOW_PUT
:
1780 put
= &op
->flow_put
;
1781 dpif_netlink_init_flow_put(dpif
, put
, &flow
);
1783 flow
.nlmsg_flags
|= NLM_F_ECHO
;
1784 aux
->txn
.reply
= &aux
->reply
;
1786 dpif_netlink_flow_to_ofpbuf(&flow
, &aux
->request
);
1789 case DPIF_OP_FLOW_DEL
:
1790 del
= &op
->flow_del
;
1791 dpif_netlink_init_flow_del(dpif
, del
, &flow
);
1793 flow
.nlmsg_flags
|= NLM_F_ECHO
;
1794 aux
->txn
.reply
= &aux
->reply
;
1796 dpif_netlink_flow_to_ofpbuf(&flow
, &aux
->request
);
1799 case DPIF_OP_EXECUTE
:
1800 /* Can't execute a packet that won't fit in a Netlink attribute. */
1801 if (OVS_UNLIKELY(nl_attr_oversized(
1802 dp_packet_size(op
->execute
.packet
)))) {
1803 /* Report an error immediately if this is the first operation.
1804 * Otherwise the easiest thing to do is to postpone to the next
1805 * call (when this will be the first operation). */
1807 VLOG_ERR_RL(&error_rl
,
1808 "dropping oversized %"PRIu32
"-byte packet",
1809 dp_packet_size(op
->execute
.packet
));
1810 op
->error
= ENOBUFS
;
1815 dpif_netlink_encode_execute(dpif
->dp_ifindex
, &op
->execute
,
1820 case DPIF_OP_FLOW_GET
:
1821 get
= &op
->flow_get
;
1822 dpif_netlink_init_flow_get(dpif
, get
, &flow
);
1823 aux
->txn
.reply
= get
->buffer
;
1824 dpif_netlink_flow_to_ofpbuf(&flow
, &aux
->request
);
1832 for (i
= 0; i
< n_ops
; i
++) {
1833 txnsp
[i
] = &auxes
[i
].txn
;
1835 nl_transact_multiple(NETLINK_GENERIC
, txnsp
, n_ops
);
1837 for (i
= 0; i
< n_ops
; i
++) {
1838 struct op_auxdata
*aux
= &auxes
[i
];
1839 struct nl_transaction
*txn
= &auxes
[i
].txn
;
1840 struct dpif_op
*op
= ops
[i
];
1841 struct dpif_flow_put
*put
;
1842 struct dpif_flow_del
*del
;
1843 struct dpif_flow_get
*get
;
1845 op
->error
= txn
->error
;
1848 case DPIF_OP_FLOW_PUT
:
1849 put
= &op
->flow_put
;
1852 struct dpif_netlink_flow reply
;
1854 op
->error
= dpif_netlink_flow_from_ofpbuf(&reply
,
1857 dpif_netlink_flow_get_stats(&reply
, put
->stats
);
1863 case DPIF_OP_FLOW_DEL
:
1864 del
= &op
->flow_del
;
1867 struct dpif_netlink_flow reply
;
1869 op
->error
= dpif_netlink_flow_from_ofpbuf(&reply
,
1872 dpif_netlink_flow_get_stats(&reply
, del
->stats
);
1878 case DPIF_OP_EXECUTE
:
1881 case DPIF_OP_FLOW_GET
:
1882 get
= &op
->flow_get
;
1884 struct dpif_netlink_flow reply
;
1886 op
->error
= dpif_netlink_flow_from_ofpbuf(&reply
, txn
->reply
);
1888 dpif_netlink_flow_to_dpif_flow(&dpif
->dpif
, get
->flow
,
1898 ofpbuf_uninit(&aux
->request
);
1899 ofpbuf_uninit(&aux
->reply
);
1906 parse_flow_get(struct dpif_netlink
*dpif
, struct dpif_flow_get
*get
)
1908 struct dpif_flow
*dpif_flow
= get
->flow
;
1910 struct nlattr
*actions
;
1911 struct dpif_flow_stats stats
;
1912 struct dpif_flow_attrs attrs
;
1914 uint64_t act_buf
[1024 / 8];
1915 struct odputil_keybuf maskbuf
;
1916 struct odputil_keybuf keybuf
;
1917 struct odputil_keybuf actbuf
;
1918 struct ofpbuf key
, mask
, act
;
1921 ofpbuf_use_stack(&buf
, &act_buf
, sizeof act_buf
);
1922 err
= netdev_ports_flow_get(dpif
->dpif
.dpif_class
, &match
,
1923 &actions
, get
->ufid
, &stats
, &attrs
, &buf
);
1928 VLOG_DBG("found flow from netdev, translating to dpif flow");
1930 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
1931 ofpbuf_use_stack(&act
, &actbuf
, sizeof actbuf
);
1932 ofpbuf_use_stack(&mask
, &maskbuf
, sizeof maskbuf
);
1933 dpif_netlink_netdev_match_to_dpif_flow(&match
, &key
, &mask
, actions
,
1935 (ovs_u128
*) get
->ufid
,
1938 ofpbuf_put(get
->buffer
, nl_attr_get(actions
), nl_attr_get_size(actions
));
1939 dpif_flow
->actions
= ofpbuf_at(get
->buffer
, 0, 0);
1940 dpif_flow
->actions_len
= nl_attr_get_size(actions
);
1946 parse_flow_put(struct dpif_netlink
*dpif
, struct dpif_flow_put
*put
)
1948 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
1949 const struct dpif_class
*dpif_class
= dpif
->dpif
.dpif_class
;
1952 const struct nlattr
*nla
;
1955 struct offload_info info
;
1956 ovs_be16 dst_port
= 0;
1957 uint8_t csum_on
= false;
1960 if (put
->flags
& DPIF_FP_PROBE
) {
1964 err
= parse_key_and_mask_to_match(put
->key
, put
->key_len
, put
->mask
,
1965 put
->mask_len
, &match
);
1970 /* When we try to install a dummy flow from a probed feature. */
1971 if (match
.flow
.dl_type
== htons(0x1234)) {
1975 in_port
= match
.flow
.in_port
.odp_port
;
1976 dev
= netdev_ports_get(in_port
, dpif_class
);
1981 /* Get tunnel dst port */
1982 NL_ATTR_FOR_EACH(nla
, left
, put
->actions
, put
->actions_len
) {
1983 if (nl_attr_type(nla
) == OVS_ACTION_ATTR_OUTPUT
) {
1984 const struct netdev_tunnel_config
*tnl_cfg
;
1985 struct netdev
*outdev
;
1986 odp_port_t out_port
;
1988 out_port
= nl_attr_get_odp_port(nla
);
1989 outdev
= netdev_ports_get(out_port
, dpif_class
);
1994 tnl_cfg
= netdev_get_tunnel_config(outdev
);
1995 if (tnl_cfg
&& tnl_cfg
->dst_port
!= 0) {
1996 dst_port
= tnl_cfg
->dst_port
;
1999 csum_on
= tnl_cfg
->csum
;
2001 netdev_close(outdev
);
2005 info
.dpif_class
= dpif_class
;
2006 info
.tp_dst_port
= dst_port
;
2007 info
.tunnel_csum_on
= csum_on
;
2008 err
= netdev_flow_put(dev
, &match
,
2009 CONST_CAST(struct nlattr
*, put
->actions
),
2011 CONST_CAST(ovs_u128
*, put
->ufid
),
2015 if (put
->flags
& DPIF_FP_MODIFY
) {
2016 struct dpif_op
*opp
;
2019 op
.type
= DPIF_OP_FLOW_DEL
;
2020 op
.flow_del
.key
= put
->key
;
2021 op
.flow_del
.key_len
= put
->key_len
;
2022 op
.flow_del
.ufid
= put
->ufid
;
2023 op
.flow_del
.pmd_id
= put
->pmd_id
;
2024 op
.flow_del
.stats
= NULL
;
2025 op
.flow_del
.terse
= false;
2028 dpif_netlink_operate__(dpif
, &opp
, 1);
2031 VLOG_DBG("added flow");
2032 } else if (err
!= EEXIST
) {
2033 struct netdev
*oor_netdev
= NULL
;
2034 if (err
== ENOSPC
&& netdev_is_offload_rebalance_policy_enabled()) {
2036 * We need to set OOR on the input netdev (i.e, 'dev') for the
2037 * flow. But if the flow has a tunnel attribute (i.e, decap action,
2038 * with a virtual device like a VxLAN interface as its in-port),
2039 * then lookup and set OOR on the underlying tunnel (real) netdev.
2041 oor_netdev
= flow_get_tunnel_netdev(&match
.flow
.tunnel
);
2043 /* Not a 'tunnel' flow */
2046 netdev_set_hw_info(oor_netdev
, HW_INFO_TYPE_OOR
, true);
2048 VLOG_ERR_RL(&rl
, "failed to offload flow: %s: %s", ovs_strerror(err
),
2049 (oor_netdev
? oor_netdev
->name
: dev
->name
));
2053 if (err
&& err
!= EEXIST
&& (put
->flags
& DPIF_FP_MODIFY
)) {
2054 /* Modified rule can't be offloaded, try and delete from HW */
2055 int del_err
= netdev_flow_del(dev
, put
->ufid
, put
->stats
);
2058 /* Delete from hw success, so old flow was offloaded.
2059 * Change flags to create the flow in kernel */
2060 put
->flags
&= ~DPIF_FP_MODIFY
;
2061 put
->flags
|= DPIF_FP_CREATE
;
2062 } else if (del_err
!= ENOENT
) {
2063 VLOG_ERR_RL(&rl
, "failed to delete offloaded flow: %s",
2064 ovs_strerror(del_err
));
2065 /* stop proccesing the flow in kernel */
2076 try_send_to_netdev(struct dpif_netlink
*dpif
, struct dpif_op
*op
)
2078 int err
= EOPNOTSUPP
;
2081 case DPIF_OP_FLOW_PUT
: {
2082 struct dpif_flow_put
*put
= &op
->flow_put
;
2088 log_flow_put_message(&dpif
->dpif
, &this_module
, put
, 0);
2089 err
= parse_flow_put(dpif
, put
);
2092 case DPIF_OP_FLOW_DEL
: {
2093 struct dpif_flow_del
*del
= &op
->flow_del
;
2099 log_flow_del_message(&dpif
->dpif
, &this_module
, del
, 0);
2100 err
= netdev_ports_flow_del(dpif
->dpif
.dpif_class
, del
->ufid
,
2104 case DPIF_OP_FLOW_GET
: {
2105 struct dpif_flow_get
*get
= &op
->flow_get
;
2107 if (!op
->flow_get
.ufid
) {
2111 log_flow_get_message(&dpif
->dpif
, &this_module
, get
, 0);
2112 err
= parse_flow_get(dpif
, get
);
2115 case DPIF_OP_EXECUTE
:
2124 dpif_netlink_operate_chunks(struct dpif_netlink
*dpif
, struct dpif_op
**ops
,
2128 size_t chunk
= dpif_netlink_operate__(dpif
, ops
, n_ops
);
2136 dpif_netlink_operate(struct dpif
*dpif_
, struct dpif_op
**ops
, size_t n_ops
)
2138 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2139 struct dpif_op
*new_ops
[OPERATE_MAX_OPS
];
2144 if (netdev_is_flow_api_enabled()) {
2148 while (n_ops
> 0 && count
< OPERATE_MAX_OPS
) {
2149 struct dpif_op
*op
= ops
[i
++];
2151 err
= try_send_to_netdev(dpif
, op
);
2152 if (err
&& err
!= EEXIST
) {
2153 new_ops
[count
++] = op
;
2161 dpif_netlink_operate_chunks(dpif
, new_ops
, count
);
2164 dpif_netlink_operate_chunks(dpif
, ops
, n_ops
);
2170 dpif_netlink_handler_uninit(struct dpif_handler
*handler
)
2172 vport_delete_sock_pool(handler
);
2176 dpif_netlink_handler_init(struct dpif_handler
*handler
)
2178 return vport_create_sock_pool(handler
);
2183 dpif_netlink_handler_init(struct dpif_handler
*handler
)
2185 handler
->epoll_fd
= epoll_create(10);
2186 return handler
->epoll_fd
< 0 ? errno
: 0;
2190 dpif_netlink_handler_uninit(struct dpif_handler
*handler
)
2192 close(handler
->epoll_fd
);
2196 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
2197 * currently in 'dpif' in the kernel, by adding a new set of channels for
2198 * any kernel vport that lacks one and deleting any channels that have no
2199 * backing kernel vports. */
2201 dpif_netlink_refresh_channels(struct dpif_netlink
*dpif
, uint32_t n_handlers
)
2202 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
2204 unsigned long int *keep_channels
;
2205 struct dpif_netlink_vport vport
;
2206 size_t keep_channels_nbits
;
2207 struct nl_dump dump
;
2208 uint64_t reply_stub
[NL_DUMP_BUFSIZE
/ 8];
2213 ovs_assert(!WINDOWS
|| n_handlers
<= 1);
2214 ovs_assert(!WINDOWS
|| dpif
->n_handlers
<= 1);
2216 if (dpif
->n_handlers
!= n_handlers
) {
2217 destroy_all_channels(dpif
);
2218 dpif
->handlers
= xzalloc(n_handlers
* sizeof *dpif
->handlers
);
2219 for (i
= 0; i
< n_handlers
; i
++) {
2221 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
2223 error
= dpif_netlink_handler_init(handler
);
2227 for (j
= 0; j
< i
; j
++) {
2228 struct dpif_handler
*tmp
= &dpif
->handlers
[j
];
2229 dpif_netlink_handler_uninit(tmp
);
2231 free(dpif
->handlers
);
2232 dpif
->handlers
= NULL
;
2237 dpif
->n_handlers
= n_handlers
;
2240 for (i
= 0; i
< n_handlers
; i
++) {
2241 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
2243 handler
->event_offset
= handler
->n_events
= 0;
2246 keep_channels_nbits
= dpif
->uc_array_size
;
2247 keep_channels
= bitmap_allocate(keep_channels_nbits
);
2249 ofpbuf_use_stub(&buf
, reply_stub
, sizeof reply_stub
);
2250 dpif_netlink_port_dump_start__(dpif
, &dump
);
2251 while (!dpif_netlink_port_dump_next__(dpif
, &dump
, &vport
, &buf
)) {
2252 uint32_t port_no
= odp_to_u32(vport
.port_no
);
2253 uint32_t upcall_pid
;
2256 if (port_no
>= dpif
->uc_array_size
2257 || !vport_get_pid(dpif
, port_no
, &upcall_pid
)) {
2258 struct nl_sock
*socksp
;
2260 if (nl_sock_create(NETLINK_GENERIC
, &socksp
)) {
2264 error
= vport_add_channel(dpif
, vport
.port_no
, socksp
);
2266 VLOG_INFO("%s: could not add channels for port %s",
2267 dpif_name(&dpif
->dpif
), vport
.name
);
2268 nl_sock_destroy(socksp
);
2272 upcall_pid
= nl_sock_pid(socksp
);
2275 /* Configure the vport to deliver misses to 'sock'. */
2276 if (vport
.upcall_pids
[0] == 0
2277 || vport
.n_upcall_pids
!= 1
2278 || upcall_pid
!= vport
.upcall_pids
[0]) {
2279 struct dpif_netlink_vport vport_request
;
2281 dpif_netlink_vport_init(&vport_request
);
2282 vport_request
.cmd
= OVS_VPORT_CMD_SET
;
2283 vport_request
.dp_ifindex
= dpif
->dp_ifindex
;
2284 vport_request
.port_no
= vport
.port_no
;
2285 vport_request
.n_upcall_pids
= 1;
2286 vport_request
.upcall_pids
= &upcall_pid
;
2287 error
= dpif_netlink_vport_transact(&vport_request
, NULL
, NULL
);
2289 VLOG_WARN_RL(&error_rl
,
2290 "%s: failed to set upcall pid on port: %s",
2291 dpif_name(&dpif
->dpif
), ovs_strerror(error
));
2293 if (error
!= ENODEV
&& error
!= ENOENT
) {
2296 /* The vport isn't really there, even though the dump says
2297 * it is. Probably we just hit a race after a port
2304 if (port_no
< keep_channels_nbits
) {
2305 bitmap_set1(keep_channels
, port_no
);
2310 vport_del_channels(dpif
, vport
.port_no
);
2312 nl_dump_done(&dump
);
2313 ofpbuf_uninit(&buf
);
2315 /* Discard any saved channels that we didn't reuse. */
2316 for (i
= 0; i
< keep_channels_nbits
; i
++) {
2317 if (!bitmap_is_set(keep_channels
, i
)) {
2318 vport_del_channels(dpif
, u32_to_odp(i
));
2321 free(keep_channels
);
2327 dpif_netlink_recv_set__(struct dpif_netlink
*dpif
, bool enable
)
2328 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
2330 if ((dpif
->handlers
!= NULL
) == enable
) {
2332 } else if (!enable
) {
2333 destroy_all_channels(dpif
);
2336 return dpif_netlink_refresh_channels(dpif
, 1);
2341 dpif_netlink_recv_set(struct dpif
*dpif_
, bool enable
)
2343 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2346 fat_rwlock_wrlock(&dpif
->upcall_lock
);
2347 error
= dpif_netlink_recv_set__(dpif
, enable
);
2348 fat_rwlock_unlock(&dpif
->upcall_lock
);
2354 dpif_netlink_handlers_set(struct dpif
*dpif_
, uint32_t n_handlers
)
2356 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2360 /* Multiple upcall handlers will be supported once kernel datapath supports
2362 if (n_handlers
> 1) {
2367 fat_rwlock_wrlock(&dpif
->upcall_lock
);
2368 if (dpif
->handlers
) {
2369 error
= dpif_netlink_refresh_channels(dpif
, n_handlers
);
2371 fat_rwlock_unlock(&dpif
->upcall_lock
);
2377 dpif_netlink_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2378 uint32_t queue_id
, uint32_t *priority
)
2380 if (queue_id
< 0xf000) {
2381 *priority
= TC_H_MAKE(1 << 16, queue_id
+ 1);
2389 parse_odp_packet(const struct dpif_netlink
*dpif
, struct ofpbuf
*buf
,
2390 struct dpif_upcall
*upcall
, int *dp_ifindex
)
2392 static const struct nl_policy ovs_packet_policy
[] = {
2393 /* Always present. */
2394 [OVS_PACKET_ATTR_PACKET
] = { .type
= NL_A_UNSPEC
,
2395 .min_len
= ETH_HEADER_LEN
},
2396 [OVS_PACKET_ATTR_KEY
] = { .type
= NL_A_NESTED
},
2398 /* OVS_PACKET_CMD_ACTION only. */
2399 [OVS_PACKET_ATTR_USERDATA
] = { .type
= NL_A_UNSPEC
, .optional
= true },
2400 [OVS_PACKET_ATTR_EGRESS_TUN_KEY
] = { .type
= NL_A_NESTED
, .optional
= true },
2401 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NL_A_NESTED
, .optional
= true },
2402 [OVS_PACKET_ATTR_MRU
] = { .type
= NL_A_U16
, .optional
= true }
2405 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
2406 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
2407 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
2408 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
2410 struct nlattr
*a
[ARRAY_SIZE(ovs_packet_policy
)];
2411 if (!nlmsg
|| !genl
|| !ovs_header
2412 || nlmsg
->nlmsg_type
!= ovs_packet_family
2413 || !nl_policy_parse(&b
, 0, ovs_packet_policy
, a
,
2414 ARRAY_SIZE(ovs_packet_policy
))) {
2418 int type
= (genl
->cmd
== OVS_PACKET_CMD_MISS
? DPIF_UC_MISS
2419 : genl
->cmd
== OVS_PACKET_CMD_ACTION
? DPIF_UC_ACTION
2425 /* (Re)set ALL fields of '*upcall' on successful return. */
2426 upcall
->type
= type
;
2427 upcall
->key
= CONST_CAST(struct nlattr
*,
2428 nl_attr_get(a
[OVS_PACKET_ATTR_KEY
]));
2429 upcall
->key_len
= nl_attr_get_size(a
[OVS_PACKET_ATTR_KEY
]);
2430 dpif_flow_hash(&dpif
->dpif
, upcall
->key
, upcall
->key_len
, &upcall
->ufid
);
2431 upcall
->userdata
= a
[OVS_PACKET_ATTR_USERDATA
];
2432 upcall
->out_tun_key
= a
[OVS_PACKET_ATTR_EGRESS_TUN_KEY
];
2433 upcall
->actions
= a
[OVS_PACKET_ATTR_ACTIONS
];
2434 upcall
->mru
= a
[OVS_PACKET_ATTR_MRU
];
2436 /* Allow overwriting the netlink attribute header without reallocating. */
2437 dp_packet_use_stub(&upcall
->packet
,
2438 CONST_CAST(struct nlattr
*,
2439 nl_attr_get(a
[OVS_PACKET_ATTR_PACKET
])) - 1,
2440 nl_attr_get_size(a
[OVS_PACKET_ATTR_PACKET
]) +
2441 sizeof(struct nlattr
));
2442 dp_packet_set_data(&upcall
->packet
,
2443 (char *)dp_packet_data(&upcall
->packet
) + sizeof(struct nlattr
));
2444 dp_packet_set_size(&upcall
->packet
, nl_attr_get_size(a
[OVS_PACKET_ATTR_PACKET
]));
2446 if (nl_attr_find__(upcall
->key
, upcall
->key_len
, OVS_KEY_ATTR_ETHERNET
)) {
2447 /* Ethernet frame */
2448 upcall
->packet
.packet_type
= htonl(PT_ETH
);
2450 /* Non-Ethernet packet. Get the Ethertype from the NL attributes */
2451 ovs_be16 ethertype
= 0;
2452 const struct nlattr
*et_nla
= nl_attr_find__(upcall
->key
,
2454 OVS_KEY_ATTR_ETHERTYPE
);
2456 ethertype
= nl_attr_get_be16(et_nla
);
2458 upcall
->packet
.packet_type
= PACKET_TYPE_BE(OFPHTN_ETHERTYPE
,
2460 dp_packet_set_l3(&upcall
->packet
, dp_packet_data(&upcall
->packet
));
2463 *dp_ifindex
= ovs_header
->dp_ifindex
;
2469 #define PACKET_RECV_BATCH_SIZE 50
2471 dpif_netlink_recv_windows(struct dpif_netlink
*dpif
, uint32_t handler_id
,
2472 struct dpif_upcall
*upcall
, struct ofpbuf
*buf
)
2473 OVS_REQ_RDLOCK(dpif
->upcall_lock
)
2475 struct dpif_handler
*handler
;
2477 struct dpif_windows_vport_sock
*sock_pool
;
2480 if (!dpif
->handlers
) {
2484 /* Only one handler is supported currently. */
2485 if (handler_id
>= 1) {
2489 if (handler_id
>= dpif
->n_handlers
) {
2493 handler
= &dpif
->handlers
[handler_id
];
2494 sock_pool
= handler
->vport_sock_pool
;
2496 for (i
= 0; i
< VPORT_SOCK_POOL_SIZE
; i
++) {
2501 if (++read_tries
> PACKET_RECV_BATCH_SIZE
) {
2505 error
= nl_sock_recv(sock_pool
[i
].nl_sock
, buf
, NULL
, false);
2506 if (error
== ENOBUFS
) {
2507 /* ENOBUFS typically means that we've received so many
2508 * packets that the buffer overflowed. Try again
2509 * immediately because there's almost certainly a packet
2510 * waiting for us. */
2511 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2515 /* XXX: ch->last_poll = time_msec(); */
2517 if (error
== EAGAIN
) {
2523 error
= parse_odp_packet(dpif
, buf
, upcall
, &dp_ifindex
);
2524 if (!error
&& dp_ifindex
== dpif
->dp_ifindex
) {
2536 dpif_netlink_recv__(struct dpif_netlink
*dpif
, uint32_t handler_id
,
2537 struct dpif_upcall
*upcall
, struct ofpbuf
*buf
)
2538 OVS_REQ_RDLOCK(dpif
->upcall_lock
)
2540 struct dpif_handler
*handler
;
2543 if (!dpif
->handlers
|| handler_id
>= dpif
->n_handlers
) {
2547 handler
= &dpif
->handlers
[handler_id
];
2548 if (handler
->event_offset
>= handler
->n_events
) {
2551 handler
->event_offset
= handler
->n_events
= 0;
2554 retval
= epoll_wait(handler
->epoll_fd
, handler
->epoll_events
,
2555 dpif
->uc_array_size
, 0);
2556 } while (retval
< 0 && errno
== EINTR
);
2559 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
2560 VLOG_WARN_RL(&rl
, "epoll_wait failed (%s)", ovs_strerror(errno
));
2561 } else if (retval
> 0) {
2562 handler
->n_events
= retval
;
2566 while (handler
->event_offset
< handler
->n_events
) {
2567 int idx
= handler
->epoll_events
[handler
->event_offset
].data
.u32
;
2568 struct dpif_channel
*ch
= &dpif
->channels
[idx
];
2570 handler
->event_offset
++;
2576 if (++read_tries
> 50) {
2580 error
= nl_sock_recv(ch
->sock
, buf
, NULL
, false);
2581 if (error
== ENOBUFS
) {
2582 /* ENOBUFS typically means that we've received so many
2583 * packets that the buffer overflowed. Try again
2584 * immediately because there's almost certainly a packet
2585 * waiting for us. */
2586 report_loss(dpif
, ch
, idx
, handler_id
);
2590 ch
->last_poll
= time_msec();
2592 if (error
== EAGAIN
) {
2598 error
= parse_odp_packet(dpif
, buf
, upcall
, &dp_ifindex
);
2599 if (!error
&& dp_ifindex
== dpif
->dp_ifindex
) {
2612 dpif_netlink_recv(struct dpif
*dpif_
, uint32_t handler_id
,
2613 struct dpif_upcall
*upcall
, struct ofpbuf
*buf
)
2615 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2618 fat_rwlock_rdlock(&dpif
->upcall_lock
);
2620 error
= dpif_netlink_recv_windows(dpif
, handler_id
, upcall
, buf
);
2622 error
= dpif_netlink_recv__(dpif
, handler_id
, upcall
, buf
);
2624 fat_rwlock_unlock(&dpif
->upcall_lock
);
2630 dpif_netlink_recv_wait__(struct dpif_netlink
*dpif
, uint32_t handler_id
)
2631 OVS_REQ_RDLOCK(dpif
->upcall_lock
)
2635 struct dpif_windows_vport_sock
*sock_pool
=
2636 dpif
->handlers
[handler_id
].vport_sock_pool
;
2638 /* Only one handler is supported currently. */
2639 if (handler_id
>= 1) {
2643 for (i
= 0; i
< VPORT_SOCK_POOL_SIZE
; i
++) {
2644 nl_sock_wait(sock_pool
[i
].nl_sock
, POLLIN
);
2647 if (dpif
->handlers
&& handler_id
< dpif
->n_handlers
) {
2648 struct dpif_handler
*handler
= &dpif
->handlers
[handler_id
];
2650 poll_fd_wait(handler
->epoll_fd
, POLLIN
);
2656 dpif_netlink_recv_wait(struct dpif
*dpif_
, uint32_t handler_id
)
2658 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2660 fat_rwlock_rdlock(&dpif
->upcall_lock
);
2661 dpif_netlink_recv_wait__(dpif
, handler_id
);
2662 fat_rwlock_unlock(&dpif
->upcall_lock
);
2666 dpif_netlink_recv_purge__(struct dpif_netlink
*dpif
)
2667 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
2669 if (dpif
->handlers
) {
2672 if (!dpif
->channels
[0].sock
) {
2675 for (i
= 0; i
< dpif
->uc_array_size
; i
++ ) {
2677 nl_sock_drain(dpif
->channels
[i
].sock
);
2683 dpif_netlink_recv_purge(struct dpif
*dpif_
)
2685 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2687 fat_rwlock_wrlock(&dpif
->upcall_lock
);
2688 dpif_netlink_recv_purge__(dpif
);
2689 fat_rwlock_unlock(&dpif
->upcall_lock
);
2693 dpif_netlink_get_datapath_version(void)
2695 char *version_str
= NULL
;
2699 #define MAX_VERSION_STR_SIZE 80
2700 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2703 f
= fopen(LINUX_DATAPATH_VERSION_FILE
, "r");
2706 char version
[MAX_VERSION_STR_SIZE
];
2708 if (fgets(version
, MAX_VERSION_STR_SIZE
, f
)) {
2709 newline
= strchr(version
, '\n');
2713 version_str
= xstrdup(version
);
2722 struct dpif_netlink_ct_dump_state
{
2723 struct ct_dpif_dump_state up
;
2724 struct nl_ct_dump_state
*nl_ct_dump
;
2728 dpif_netlink_ct_dump_start(struct dpif
*dpif OVS_UNUSED
,
2729 struct ct_dpif_dump_state
**dump_
,
2730 const uint16_t *zone
, int *ptot_bkts
)
2732 struct dpif_netlink_ct_dump_state
*dump
;
2735 dump
= xzalloc(sizeof *dump
);
2736 err
= nl_ct_dump_start(&dump
->nl_ct_dump
, zone
, ptot_bkts
);
2748 dpif_netlink_ct_dump_next(struct dpif
*dpif OVS_UNUSED
,
2749 struct ct_dpif_dump_state
*dump_
,
2750 struct ct_dpif_entry
*entry
)
2752 struct dpif_netlink_ct_dump_state
*dump
;
2754 INIT_CONTAINER(dump
, dump_
, up
);
2756 return nl_ct_dump_next(dump
->nl_ct_dump
, entry
);
2760 dpif_netlink_ct_dump_done(struct dpif
*dpif OVS_UNUSED
,
2761 struct ct_dpif_dump_state
*dump_
)
2763 struct dpif_netlink_ct_dump_state
*dump
;
2766 INIT_CONTAINER(dump
, dump_
, up
);
2768 err
= nl_ct_dump_done(dump
->nl_ct_dump
);
2774 dpif_netlink_ct_flush(struct dpif
*dpif OVS_UNUSED
, const uint16_t *zone
,
2775 const struct ct_dpif_tuple
*tuple
)
2778 return nl_ct_flush_tuple(tuple
, zone
? *zone
: 0);
2780 return nl_ct_flush_zone(*zone
);
2782 return nl_ct_flush();
2787 dpif_netlink_ct_set_limits(struct dpif
*dpif OVS_UNUSED
,
2788 const uint32_t *default_limits
,
2789 const struct ovs_list
*zone_limits
)
2791 struct ovs_zone_limit req_zone_limit
;
2793 if (ovs_ct_limit_family
< 0) {
2797 struct ofpbuf
*request
= ofpbuf_new(NL_DUMP_BUFSIZE
);
2798 nl_msg_put_genlmsghdr(request
, 0, ovs_ct_limit_family
,
2799 NLM_F_REQUEST
| NLM_F_ECHO
, OVS_CT_LIMIT_CMD_SET
,
2800 OVS_CT_LIMIT_VERSION
);
2802 struct ovs_header
*ovs_header
;
2803 ovs_header
= ofpbuf_put_uninit(request
, sizeof *ovs_header
);
2804 ovs_header
->dp_ifindex
= 0;
2807 opt_offset
= nl_msg_start_nested(request
, OVS_CT_LIMIT_ATTR_ZONE_LIMIT
);
2808 if (default_limits
) {
2809 req_zone_limit
.zone_id
= OVS_ZONE_LIMIT_DEFAULT_ZONE
;
2810 req_zone_limit
.limit
= *default_limits
;
2811 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
2814 if (!ovs_list_is_empty(zone_limits
)) {
2815 struct ct_dpif_zone_limit
*zone_limit
;
2817 LIST_FOR_EACH (zone_limit
, node
, zone_limits
) {
2818 req_zone_limit
.zone_id
= zone_limit
->zone
;
2819 req_zone_limit
.limit
= zone_limit
->limit
;
2820 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
2823 nl_msg_end_nested(request
, opt_offset
);
2825 int err
= nl_transact(NETLINK_GENERIC
, request
, NULL
);
2826 ofpbuf_uninit(request
);
2831 dpif_netlink_zone_limits_from_ofpbuf(const struct ofpbuf
*buf
,
2832 uint32_t *default_limit
,
2833 struct ovs_list
*zone_limits
)
2835 static const struct nl_policy ovs_ct_limit_policy
[] = {
2836 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT
] = { .type
= NL_A_NESTED
,
2840 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
2841 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
2842 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
2843 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
2845 struct nlattr
*attr
[ARRAY_SIZE(ovs_ct_limit_policy
)];
2847 if (!nlmsg
|| !genl
|| !ovs_header
2848 || nlmsg
->nlmsg_type
!= ovs_ct_limit_family
2849 || !nl_policy_parse(&b
, 0, ovs_ct_limit_policy
, attr
,
2850 ARRAY_SIZE(ovs_ct_limit_policy
))) {
2855 if (!attr
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]) {
2859 int rem
= NLA_ALIGN(
2860 nl_attr_get_size(attr
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]));
2861 const struct ovs_zone_limit
*zone_limit
=
2862 nl_attr_get(attr
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]);
2864 while (rem
>= sizeof *zone_limit
) {
2865 if (zone_limit
->zone_id
== OVS_ZONE_LIMIT_DEFAULT_ZONE
) {
2866 *default_limit
= zone_limit
->limit
;
2867 } else if (zone_limit
->zone_id
< OVS_ZONE_LIMIT_DEFAULT_ZONE
||
2868 zone_limit
->zone_id
> UINT16_MAX
) {
2870 ct_dpif_push_zone_limit(zone_limits
, zone_limit
->zone_id
,
2871 zone_limit
->limit
, zone_limit
->count
);
2873 rem
-= NLA_ALIGN(sizeof *zone_limit
);
2874 zone_limit
= ALIGNED_CAST(struct ovs_zone_limit
*,
2875 (unsigned char *) zone_limit
+ NLA_ALIGN(sizeof *zone_limit
));
2881 dpif_netlink_ct_get_limits(struct dpif
*dpif OVS_UNUSED
,
2882 uint32_t *default_limit
,
2883 const struct ovs_list
*zone_limits_request
,
2884 struct ovs_list
*zone_limits_reply
)
2886 if (ovs_ct_limit_family
< 0) {
2890 struct ofpbuf
*request
= ofpbuf_new(NL_DUMP_BUFSIZE
);
2891 nl_msg_put_genlmsghdr(request
, 0, ovs_ct_limit_family
,
2892 NLM_F_REQUEST
| NLM_F_ECHO
, OVS_CT_LIMIT_CMD_GET
,
2893 OVS_CT_LIMIT_VERSION
);
2895 struct ovs_header
*ovs_header
;
2896 ovs_header
= ofpbuf_put_uninit(request
, sizeof *ovs_header
);
2897 ovs_header
->dp_ifindex
= 0;
2899 if (!ovs_list_is_empty(zone_limits_request
)) {
2900 size_t opt_offset
= nl_msg_start_nested(request
,
2901 OVS_CT_LIMIT_ATTR_ZONE_LIMIT
);
2903 struct ovs_zone_limit req_zone_limit
;
2904 req_zone_limit
.zone_id
= OVS_ZONE_LIMIT_DEFAULT_ZONE
;
2905 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
2907 struct ct_dpif_zone_limit
*zone_limit
;
2908 LIST_FOR_EACH (zone_limit
, node
, zone_limits_request
) {
2909 req_zone_limit
.zone_id
= zone_limit
->zone
;
2910 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
2913 nl_msg_end_nested(request
, opt_offset
);
2916 struct ofpbuf
*reply
;
2917 int err
= nl_transact(NETLINK_GENERIC
, request
, &reply
);
2922 err
= dpif_netlink_zone_limits_from_ofpbuf(reply
, default_limit
,
2926 ofpbuf_uninit(request
);
2927 ofpbuf_uninit(reply
);
2932 dpif_netlink_ct_del_limits(struct dpif
*dpif OVS_UNUSED
,
2933 const struct ovs_list
*zone_limits
)
2935 if (ovs_ct_limit_family
< 0) {
2939 struct ofpbuf
*request
= ofpbuf_new(NL_DUMP_BUFSIZE
);
2940 nl_msg_put_genlmsghdr(request
, 0, ovs_ct_limit_family
,
2941 NLM_F_REQUEST
| NLM_F_ECHO
, OVS_CT_LIMIT_CMD_DEL
,
2942 OVS_CT_LIMIT_VERSION
);
2944 struct ovs_header
*ovs_header
;
2945 ovs_header
= ofpbuf_put_uninit(request
, sizeof *ovs_header
);
2946 ovs_header
->dp_ifindex
= 0;
2948 if (!ovs_list_is_empty(zone_limits
)) {
2950 nl_msg_start_nested(request
, OVS_CT_LIMIT_ATTR_ZONE_LIMIT
);
2952 struct ct_dpif_zone_limit
*zone_limit
;
2953 LIST_FOR_EACH (zone_limit
, node
, zone_limits
) {
2954 struct ovs_zone_limit req_zone_limit
;
2955 req_zone_limit
.zone_id
= zone_limit
->zone
;
2956 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
2958 nl_msg_end_nested(request
, opt_offset
);
2961 int err
= nl_transact(NETLINK_GENERIC
, request
, NULL
);
2963 ofpbuf_uninit(request
);
2969 /* Set of supported meter flags */
2970 #define DP_SUPPORTED_METER_FLAGS_MASK \
2971 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
2973 /* Meter support was introduced in Linux 4.15. In some versions of
2974 * Linux 4.15, 4.16, and 4.17, there was a bug that never set the id
2975 * when the meter was created, so all meters essentially had an id of
2976 * zero. Check for that condition and disable meters on those kernels. */
2977 static bool probe_broken_meters(struct dpif
*);
2980 dpif_netlink_meter_init(struct dpif_netlink
*dpif
, struct ofpbuf
*buf
,
2981 void *stub
, size_t size
, uint32_t command
)
2983 ofpbuf_use_stub(buf
, stub
, size
);
2985 nl_msg_put_genlmsghdr(buf
, 0, ovs_meter_family
, NLM_F_REQUEST
| NLM_F_ECHO
,
2986 command
, OVS_METER_VERSION
);
2988 struct ovs_header
*ovs_header
;
2989 ovs_header
= ofpbuf_put_uninit(buf
, sizeof *ovs_header
);
2990 ovs_header
->dp_ifindex
= dpif
->dp_ifindex
;
2993 /* Execute meter 'request' in the kernel datapath. If the command
2994 * fails, returns a positive errno value. Otherwise, stores the reply
2995 * in '*replyp', parses the policy according to 'reply_policy' into the
2996 * array of Netlink attribute in 'a', and returns 0. On success, the
2997 * caller is responsible for calling ofpbuf_delete() on '*replyp'
2998 * ('replyp' will contain pointers into 'a'). */
3000 dpif_netlink_meter_transact(struct ofpbuf
*request
, struct ofpbuf
**replyp
,
3001 const struct nl_policy
*reply_policy
,
3002 struct nlattr
**a
, size_t size_a
)
3004 int error
= nl_transact(NETLINK_GENERIC
, request
, replyp
);
3005 ofpbuf_uninit(request
);
3011 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(*replyp
, sizeof *nlmsg
);
3012 struct genlmsghdr
*genl
= ofpbuf_try_pull(*replyp
, sizeof *genl
);
3013 struct ovs_header
*ovs_header
= ofpbuf_try_pull(*replyp
,
3014 sizeof *ovs_header
);
3015 if (!nlmsg
|| !genl
|| !ovs_header
3016 || nlmsg
->nlmsg_type
!= ovs_meter_family
3017 || !nl_policy_parse(*replyp
, 0, reply_policy
, a
, size_a
)) {
3018 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3020 "Kernel module response to meter tranaction is invalid");
3027 dpif_netlink_meter_get_features(const struct dpif
*dpif_
,
3028 struct ofputil_meter_features
*features
)
3030 if (probe_broken_meters(CONST_CAST(struct dpif
*, dpif_
))) {
3035 struct ofpbuf buf
, *msg
;
3036 uint64_t stub
[1024 / 8];
3038 static const struct nl_policy ovs_meter_features_policy
[] = {
3039 [OVS_METER_ATTR_MAX_METERS
] = { .type
= NL_A_U32
},
3040 [OVS_METER_ATTR_MAX_BANDS
] = { .type
= NL_A_U32
},
3041 [OVS_METER_ATTR_BANDS
] = { .type
= NL_A_NESTED
, .optional
= true },
3043 struct nlattr
*a
[ARRAY_SIZE(ovs_meter_features_policy
)];
3045 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
3046 dpif_netlink_meter_init(dpif
, &buf
, stub
, sizeof stub
,
3047 OVS_METER_CMD_FEATURES
);
3048 if (dpif_netlink_meter_transact(&buf
, &msg
, ovs_meter_features_policy
, a
,
3049 ARRAY_SIZE(ovs_meter_features_policy
))) {
3050 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3052 "dpif_netlink_meter_transact OVS_METER_CMD_FEATURES failed");
3056 features
->max_meters
= nl_attr_get_u32(a
[OVS_METER_ATTR_MAX_METERS
]);
3057 features
->max_bands
= nl_attr_get_u32(a
[OVS_METER_ATTR_MAX_BANDS
]);
3059 /* Bands is a nested attribute of zero or more nested
3060 * band attributes. */
3061 if (a
[OVS_METER_ATTR_BANDS
]) {
3062 const struct nlattr
*nla
;
3065 NL_NESTED_FOR_EACH (nla
, left
, a
[OVS_METER_ATTR_BANDS
]) {
3066 const struct nlattr
*band_nla
;
3069 NL_NESTED_FOR_EACH (band_nla
, band_left
, nla
) {
3070 if (nl_attr_type(band_nla
) == OVS_BAND_ATTR_TYPE
) {
3071 if (nl_attr_get_size(band_nla
) == sizeof(uint32_t)) {
3072 switch (nl_attr_get_u32(band_nla
)) {
3073 case OVS_METER_BAND_TYPE_DROP
:
3074 features
->band_types
|= 1 << OFPMBT13_DROP
;
3082 features
->capabilities
= DP_SUPPORTED_METER_FLAGS_MASK
;
3088 dpif_netlink_meter_set__(struct dpif
*dpif_
, ofproto_meter_id meter_id
,
3089 struct ofputil_meter_config
*config
)
3091 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
3092 struct ofpbuf buf
, *msg
;
3093 uint64_t stub
[1024 / 8];
3095 static const struct nl_policy ovs_meter_set_response_policy
[] = {
3096 [OVS_METER_ATTR_ID
] = { .type
= NL_A_U32
},
3098 struct nlattr
*a
[ARRAY_SIZE(ovs_meter_set_response_policy
)];
3100 if (config
->flags
& ~DP_SUPPORTED_METER_FLAGS_MASK
) {
3101 return EBADF
; /* Unsupported flags set */
3104 for (size_t i
= 0; i
< config
->n_bands
; i
++) {
3105 switch (config
->bands
[i
].type
) {
3109 return ENODEV
; /* Unsupported band type */
3113 dpif_netlink_meter_init(dpif
, &buf
, stub
, sizeof stub
, OVS_METER_CMD_SET
);
3115 nl_msg_put_u32(&buf
, OVS_METER_ATTR_ID
, meter_id
.uint32
);
3117 if (config
->flags
& OFPMF13_KBPS
) {
3118 nl_msg_put_flag(&buf
, OVS_METER_ATTR_KBPS
);
3121 size_t bands_offset
= nl_msg_start_nested(&buf
, OVS_METER_ATTR_BANDS
);
3123 for (size_t i
= 0; i
< config
->n_bands
; ++i
) {
3124 struct ofputil_meter_band
* band
= &config
->bands
[i
];
3127 size_t band_offset
= nl_msg_start_nested(&buf
, OVS_BAND_ATTR_UNSPEC
);
3129 switch (band
->type
) {
3131 band_type
= OVS_METER_BAND_TYPE_DROP
;
3134 band_type
= OVS_METER_BAND_TYPE_UNSPEC
;
3136 nl_msg_put_u32(&buf
, OVS_BAND_ATTR_TYPE
, band_type
);
3137 nl_msg_put_u32(&buf
, OVS_BAND_ATTR_RATE
, band
->rate
);
3138 nl_msg_put_u32(&buf
, OVS_BAND_ATTR_BURST
,
3139 config
->flags
& OFPMF13_BURST
?
3140 band
->burst_size
: band
->rate
);
3141 nl_msg_end_nested(&buf
, band_offset
);
3143 nl_msg_end_nested(&buf
, bands_offset
);
3145 int error
= dpif_netlink_meter_transact(&buf
, &msg
,
3146 ovs_meter_set_response_policy
, a
,
3147 ARRAY_SIZE(ovs_meter_set_response_policy
));
3149 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3151 "dpif_netlink_meter_transact OVS_METER_CMD_SET failed");
3155 if (nl_attr_get_u32(a
[OVS_METER_ATTR_ID
]) != meter_id
.uint32
) {
3156 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3158 "Kernel returned a different meter id than requested");
3165 dpif_netlink_meter_set(struct dpif
*dpif_
, ofproto_meter_id meter_id
,
3166 struct ofputil_meter_config
*config
)
3168 if (probe_broken_meters(dpif_
)) {
3172 return dpif_netlink_meter_set__(dpif_
, meter_id
, config
);
3175 /* Retrieve statistics and/or delete meter 'meter_id'. Statistics are
3176 * stored in 'stats', if it is not null. If 'command' is
3177 * OVS_METER_CMD_DEL, the meter is deleted and statistics are optionally
3178 * retrieved. If 'command' is OVS_METER_CMD_GET, then statistics are
3179 * simply retrieved. */
3181 dpif_netlink_meter_get_stats(const struct dpif
*dpif_
,
3182 ofproto_meter_id meter_id
,
3183 struct ofputil_meter_stats
*stats
,
3185 enum ovs_meter_cmd command
)
3187 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
3188 struct ofpbuf buf
, *msg
;
3189 uint64_t stub
[1024 / 8];
3191 static const struct nl_policy ovs_meter_stats_policy
[] = {
3192 [OVS_METER_ATTR_ID
] = { .type
= NL_A_U32
, .optional
= true},
3193 [OVS_METER_ATTR_STATS
] = { NL_POLICY_FOR(struct ovs_flow_stats
),
3195 [OVS_METER_ATTR_BANDS
] = { .type
= NL_A_NESTED
, .optional
= true },
3197 struct nlattr
*a
[ARRAY_SIZE(ovs_meter_stats_policy
)];
3199 dpif_netlink_meter_init(dpif
, &buf
, stub
, sizeof stub
, command
);
3201 nl_msg_put_u32(&buf
, OVS_METER_ATTR_ID
, meter_id
.uint32
);
3203 int error
= dpif_netlink_meter_transact(&buf
, &msg
,
3204 ovs_meter_stats_policy
, a
,
3205 ARRAY_SIZE(ovs_meter_stats_policy
));
3207 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3208 VLOG_INFO_RL(&rl
, "dpif_netlink_meter_transact %s failed",
3209 command
== OVS_METER_CMD_GET
? "get" : "del");
3214 && a
[OVS_METER_ATTR_ID
]
3215 && a
[OVS_METER_ATTR_STATS
]
3216 && nl_attr_get_u32(a
[OVS_METER_ATTR_ID
]) == meter_id
.uint32
) {
3218 const struct ovs_flow_stats
*stat
;
3219 const struct nlattr
*nla
;
3222 stat
= nl_attr_get(a
[OVS_METER_ATTR_STATS
]);
3223 stats
->packet_in_count
= get_32aligned_u64(&stat
->n_packets
);
3224 stats
->byte_in_count
= get_32aligned_u64(&stat
->n_bytes
);
3226 if (a
[OVS_METER_ATTR_BANDS
]) {
3228 NL_NESTED_FOR_EACH (nla
, left
, a
[OVS_METER_ATTR_BANDS
]) {
3229 const struct nlattr
*band_nla
;
3230 band_nla
= nl_attr_find_nested(nla
, OVS_BAND_ATTR_STATS
);
3231 if (band_nla
&& nl_attr_get_size(band_nla
) \
3232 == sizeof(struct ovs_flow_stats
)) {
3233 stat
= nl_attr_get(band_nla
);
3235 if (n_bands
< max_bands
) {
3236 stats
->bands
[n_bands
].packet_count
3237 = get_32aligned_u64(&stat
->n_packets
);
3238 stats
->bands
[n_bands
].byte_count
3239 = get_32aligned_u64(&stat
->n_bytes
);
3243 stats
->bands
[n_bands
].packet_count
= 0;
3244 stats
->bands
[n_bands
].byte_count
= 0;
3248 stats
->n_bands
= n_bands
;
3250 /* For a non-existent meter, return 0 stats. */
3260 dpif_netlink_meter_get(const struct dpif
*dpif
, ofproto_meter_id meter_id
,
3261 struct ofputil_meter_stats
*stats
, uint16_t max_bands
)
3263 return dpif_netlink_meter_get_stats(dpif
, meter_id
, stats
, max_bands
,
3268 dpif_netlink_meter_del(struct dpif
*dpif
, ofproto_meter_id meter_id
,
3269 struct ofputil_meter_stats
*stats
, uint16_t max_bands
)
3271 return dpif_netlink_meter_get_stats(dpif
, meter_id
, stats
, max_bands
,
3276 probe_broken_meters__(struct dpif
*dpif
)
3278 /* This test is destructive if a probe occurs while ovs-vswitchd is
3279 * running (e.g., an ovs-dpctl meter command is called), so choose a
3280 * random high meter id to make this less likely to occur. */
3281 ofproto_meter_id id1
= { 54545401 };
3282 ofproto_meter_id id2
= { 54545402 };
3283 struct ofputil_meter_band band
= {OFPMBT13_DROP
, 0, 1, 0};
3284 struct ofputil_meter_config config1
= { 1, OFPMF13_KBPS
, 1, &band
};
3285 struct ofputil_meter_config config2
= { 2, OFPMF13_KBPS
, 1, &band
};
3287 /* Try adding two meters and make sure that they both come back with
3288 * the proper meter id. Use the "__" version so that we don't cause
3289 * a recurve deadlock. */
3290 dpif_netlink_meter_set__(dpif
, id1
, &config1
);
3291 dpif_netlink_meter_set__(dpif
, id2
, &config2
);
3293 if (dpif_netlink_meter_get(dpif
, id1
, NULL
, 0)
3294 || dpif_netlink_meter_get(dpif
, id2
, NULL
, 0)) {
3295 VLOG_INFO("The kernel module has a broken meter implementation.");
3299 dpif_netlink_meter_del(dpif
, id1
, NULL
, 0);
3300 dpif_netlink_meter_del(dpif
, id2
, NULL
, 0);
3306 probe_broken_meters(struct dpif
*dpif
)
3308 /* This is a once-only test because currently OVS only has at most a single
3309 * Netlink capable datapath on any given platform. */
3310 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3312 static bool broken_meters
= false;
3313 if (ovsthread_once_start(&once
)) {
3314 broken_meters
= probe_broken_meters__(dpif
);
3315 ovsthread_once_done(&once
);
3317 return broken_meters
;
3320 const struct dpif_class dpif_netlink_class
= {
3323 dpif_netlink_enumerate
,
3327 dpif_netlink_destroy
,
3330 dpif_netlink_get_stats
,
3331 dpif_netlink_port_add
,
3332 dpif_netlink_port_del
,
3333 NULL
, /* port_set_config */
3334 dpif_netlink_port_query_by_number
,
3335 dpif_netlink_port_query_by_name
,
3336 dpif_netlink_port_get_pid
,
3337 dpif_netlink_port_dump_start
,
3338 dpif_netlink_port_dump_next
,
3339 dpif_netlink_port_dump_done
,
3340 dpif_netlink_port_poll
,
3341 dpif_netlink_port_poll_wait
,
3342 dpif_netlink_flow_flush
,
3343 dpif_netlink_flow_dump_create
,
3344 dpif_netlink_flow_dump_destroy
,
3345 dpif_netlink_flow_dump_thread_create
,
3346 dpif_netlink_flow_dump_thread_destroy
,
3347 dpif_netlink_flow_dump_next
,
3348 dpif_netlink_operate
,
3349 dpif_netlink_recv_set
,
3350 dpif_netlink_handlers_set
,
3351 NULL
, /* set_config */
3352 dpif_netlink_queue_to_priority
,
3354 dpif_netlink_recv_wait
,
3355 dpif_netlink_recv_purge
,
3356 NULL
, /* register_dp_purge_cb */
3357 NULL
, /* register_upcall_cb */
3358 NULL
, /* enable_upcall */
3359 NULL
, /* disable_upcall */
3360 dpif_netlink_get_datapath_version
, /* get_datapath_version */
3361 dpif_netlink_ct_dump_start
,
3362 dpif_netlink_ct_dump_next
,
3363 dpif_netlink_ct_dump_done
,
3364 dpif_netlink_ct_flush
,
3365 NULL
, /* ct_set_maxconns */
3366 NULL
, /* ct_get_maxconns */
3367 NULL
, /* ct_get_nconns */
3368 dpif_netlink_ct_set_limits
,
3369 dpif_netlink_ct_get_limits
,
3370 dpif_netlink_ct_del_limits
,
3371 dpif_netlink_meter_get_features
,
3372 dpif_netlink_meter_set
,
3373 dpif_netlink_meter_get
,
3374 dpif_netlink_meter_del
,
3378 dpif_netlink_init(void)
3380 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3383 if (ovsthread_once_start(&once
)) {
3384 error
= nl_lookup_genl_family(OVS_DATAPATH_FAMILY
,
3385 &ovs_datapath_family
);
3387 VLOG_INFO("Generic Netlink family '%s' does not exist. "
3388 "The Open vSwitch kernel module is probably not loaded.",
3389 OVS_DATAPATH_FAMILY
);
3392 error
= nl_lookup_genl_family(OVS_VPORT_FAMILY
, &ovs_vport_family
);
3395 error
= nl_lookup_genl_family(OVS_FLOW_FAMILY
, &ovs_flow_family
);
3398 error
= nl_lookup_genl_family(OVS_PACKET_FAMILY
,
3399 &ovs_packet_family
);
3402 error
= nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY
, OVS_VPORT_MCGROUP
,
3403 &ovs_vport_mcgroup
);
3406 if (nl_lookup_genl_family(OVS_METER_FAMILY
, &ovs_meter_family
)) {
3407 VLOG_INFO("The kernel module does not support meters.");
3410 if (nl_lookup_genl_family(OVS_CT_LIMIT_FAMILY
,
3411 &ovs_ct_limit_family
) < 0) {
3412 VLOG_INFO("Generic Netlink family '%s' does not exist. "
3413 "Please update the Open vSwitch kernel module to enable "
3414 "the conntrack limit feature.", OVS_CT_LIMIT_FAMILY
);
3417 ovs_tunnels_out_of_tree
= dpif_netlink_rtnl_probe_oot_tunnels();
3419 ovsthread_once_done(&once
);
3426 dpif_netlink_is_internal_device(const char *name
)
3428 struct dpif_netlink_vport reply
;
3432 error
= dpif_netlink_vport_get(name
, &reply
, &buf
);
3435 } else if (error
!= ENODEV
&& error
!= ENOENT
) {
3436 VLOG_WARN_RL(&error_rl
, "%s: vport query failed (%s)",
3437 name
, ovs_strerror(error
));
3440 return reply
.type
== OVS_VPORT_TYPE_INTERNAL
;
3443 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
3444 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
3445 * positive errno value.
3447 * 'vport' will contain pointers into 'buf', so the caller should not free
3448 * 'buf' while 'vport' is still in use. */
3450 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport
*vport
,
3451 const struct ofpbuf
*buf
)
3453 static const struct nl_policy ovs_vport_policy
[] = {
3454 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NL_A_U32
},
3455 [OVS_VPORT_ATTR_TYPE
] = { .type
= NL_A_U32
},
3456 [OVS_VPORT_ATTR_NAME
] = { .type
= NL_A_STRING
, .max_len
= IFNAMSIZ
},
3457 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NL_A_UNSPEC
},
3458 [OVS_VPORT_ATTR_STATS
] = { NL_POLICY_FOR(struct ovs_vport_stats
),
3460 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NL_A_NESTED
, .optional
= true },
3461 [OVS_VPORT_ATTR_NETNSID
] = { .type
= NL_A_U32
, .optional
= true },
3464 dpif_netlink_vport_init(vport
);
3466 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
3467 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
3468 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
3469 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
3471 struct nlattr
*a
[ARRAY_SIZE(ovs_vport_policy
)];
3472 if (!nlmsg
|| !genl
|| !ovs_header
3473 || nlmsg
->nlmsg_type
!= ovs_vport_family
3474 || !nl_policy_parse(&b
, 0, ovs_vport_policy
, a
,
3475 ARRAY_SIZE(ovs_vport_policy
))) {
3479 vport
->cmd
= genl
->cmd
;
3480 vport
->dp_ifindex
= ovs_header
->dp_ifindex
;
3481 vport
->port_no
= nl_attr_get_odp_port(a
[OVS_VPORT_ATTR_PORT_NO
]);
3482 vport
->type
= nl_attr_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
3483 vport
->name
= nl_attr_get_string(a
[OVS_VPORT_ATTR_NAME
]);
3484 if (a
[OVS_VPORT_ATTR_UPCALL_PID
]) {
3485 vport
->n_upcall_pids
= nl_attr_get_size(a
[OVS_VPORT_ATTR_UPCALL_PID
])
3486 / (sizeof *vport
->upcall_pids
);
3487 vport
->upcall_pids
= nl_attr_get(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
3490 if (a
[OVS_VPORT_ATTR_STATS
]) {
3491 vport
->stats
= nl_attr_get(a
[OVS_VPORT_ATTR_STATS
]);
3493 if (a
[OVS_VPORT_ATTR_OPTIONS
]) {
3494 vport
->options
= nl_attr_get(a
[OVS_VPORT_ATTR_OPTIONS
]);
3495 vport
->options_len
= nl_attr_get_size(a
[OVS_VPORT_ATTR_OPTIONS
]);
3497 if (a
[OVS_VPORT_ATTR_NETNSID
]) {
3498 netnsid_set(&vport
->netnsid
,
3499 nl_attr_get_u32(a
[OVS_VPORT_ATTR_NETNSID
]));
3501 netnsid_set_local(&vport
->netnsid
);
3506 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
3507 * followed by Netlink attributes corresponding to 'vport'. */
3509 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport
*vport
,
3512 struct ovs_header
*ovs_header
;
3514 nl_msg_put_genlmsghdr(buf
, 0, ovs_vport_family
, NLM_F_REQUEST
| NLM_F_ECHO
,
3515 vport
->cmd
, OVS_VPORT_VERSION
);
3517 ovs_header
= ofpbuf_put_uninit(buf
, sizeof *ovs_header
);
3518 ovs_header
->dp_ifindex
= vport
->dp_ifindex
;
3520 if (vport
->port_no
!= ODPP_NONE
) {
3521 nl_msg_put_odp_port(buf
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
);
3524 if (vport
->type
!= OVS_VPORT_TYPE_UNSPEC
) {
3525 nl_msg_put_u32(buf
, OVS_VPORT_ATTR_TYPE
, vport
->type
);
3529 nl_msg_put_string(buf
, OVS_VPORT_ATTR_NAME
, vport
->name
);
3532 if (vport
->upcall_pids
) {
3533 nl_msg_put_unspec(buf
, OVS_VPORT_ATTR_UPCALL_PID
,
3535 vport
->n_upcall_pids
* sizeof *vport
->upcall_pids
);
3539 nl_msg_put_unspec(buf
, OVS_VPORT_ATTR_STATS
,
3540 vport
->stats
, sizeof *vport
->stats
);
3543 if (vport
->options
) {
3544 nl_msg_put_nested(buf
, OVS_VPORT_ATTR_OPTIONS
,
3545 vport
->options
, vport
->options_len
);
3549 /* Clears 'vport' to "empty" values. */
3551 dpif_netlink_vport_init(struct dpif_netlink_vport
*vport
)
3553 memset(vport
, 0, sizeof *vport
);
3554 vport
->port_no
= ODPP_NONE
;
3557 /* Executes 'request' in the kernel datapath. If the command fails, returns a
3558 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
3559 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
3560 * result of the command is expected to be an ovs_vport also, which is decoded
3561 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
3562 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
3564 dpif_netlink_vport_transact(const struct dpif_netlink_vport
*request
,
3565 struct dpif_netlink_vport
*reply
,
3566 struct ofpbuf
**bufp
)
3568 struct ofpbuf
*request_buf
;
3571 ovs_assert((reply
!= NULL
) == (bufp
!= NULL
));
3573 error
= dpif_netlink_init();
3577 dpif_netlink_vport_init(reply
);
3582 request_buf
= ofpbuf_new(1024);
3583 dpif_netlink_vport_to_ofpbuf(request
, request_buf
);
3584 error
= nl_transact(NETLINK_GENERIC
, request_buf
, bufp
);
3585 ofpbuf_delete(request_buf
);
3589 error
= dpif_netlink_vport_from_ofpbuf(reply
, *bufp
);
3592 dpif_netlink_vport_init(reply
);
3593 ofpbuf_delete(*bufp
);
3600 /* Obtains information about the kernel vport named 'name' and stores it into
3601 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
3602 * longer needed ('reply' will contain pointers into '*bufp'). */
3604 dpif_netlink_vport_get(const char *name
, struct dpif_netlink_vport
*reply
,
3605 struct ofpbuf
**bufp
)
3607 struct dpif_netlink_vport request
;
3609 dpif_netlink_vport_init(&request
);
3610 request
.cmd
= OVS_VPORT_CMD_GET
;
3611 request
.name
= name
;
3613 return dpif_netlink_vport_transact(&request
, reply
, bufp
);
3616 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
3617 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
3618 * positive errno value.
3620 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
3621 * while 'dp' is still in use. */
3623 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp
*dp
, const struct ofpbuf
*buf
)
3625 static const struct nl_policy ovs_datapath_policy
[] = {
3626 [OVS_DP_ATTR_NAME
] = { .type
= NL_A_STRING
, .max_len
= IFNAMSIZ
},
3627 [OVS_DP_ATTR_STATS
] = { NL_POLICY_FOR(struct ovs_dp_stats
),
3629 [OVS_DP_ATTR_MEGAFLOW_STATS
] = {
3630 NL_POLICY_FOR(struct ovs_dp_megaflow_stats
),
3634 dpif_netlink_dp_init(dp
);
3636 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
3637 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
3638 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
3639 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
3641 struct nlattr
*a
[ARRAY_SIZE(ovs_datapath_policy
)];
3642 if (!nlmsg
|| !genl
|| !ovs_header
3643 || nlmsg
->nlmsg_type
!= ovs_datapath_family
3644 || !nl_policy_parse(&b
, 0, ovs_datapath_policy
, a
,
3645 ARRAY_SIZE(ovs_datapath_policy
))) {
3649 dp
->cmd
= genl
->cmd
;
3650 dp
->dp_ifindex
= ovs_header
->dp_ifindex
;
3651 dp
->name
= nl_attr_get_string(a
[OVS_DP_ATTR_NAME
]);
3652 if (a
[OVS_DP_ATTR_STATS
]) {
3653 dp
->stats
= nl_attr_get(a
[OVS_DP_ATTR_STATS
]);
3656 if (a
[OVS_DP_ATTR_MEGAFLOW_STATS
]) {
3657 dp
->megaflow_stats
= nl_attr_get(a
[OVS_DP_ATTR_MEGAFLOW_STATS
]);
3663 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
3665 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp
*dp
, struct ofpbuf
*buf
)
3667 struct ovs_header
*ovs_header
;
3669 nl_msg_put_genlmsghdr(buf
, 0, ovs_datapath_family
,
3670 NLM_F_REQUEST
| NLM_F_ECHO
, dp
->cmd
,
3671 OVS_DATAPATH_VERSION
);
3673 ovs_header
= ofpbuf_put_uninit(buf
, sizeof *ovs_header
);
3674 ovs_header
->dp_ifindex
= dp
->dp_ifindex
;
3677 nl_msg_put_string(buf
, OVS_DP_ATTR_NAME
, dp
->name
);
3680 if (dp
->upcall_pid
) {
3681 nl_msg_put_u32(buf
, OVS_DP_ATTR_UPCALL_PID
, *dp
->upcall_pid
);
3684 if (dp
->user_features
) {
3685 nl_msg_put_u32(buf
, OVS_DP_ATTR_USER_FEATURES
, dp
->user_features
);
3688 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
3691 /* Clears 'dp' to "empty" values. */
3693 dpif_netlink_dp_init(struct dpif_netlink_dp
*dp
)
3695 memset(dp
, 0, sizeof *dp
);
3699 dpif_netlink_dp_dump_start(struct nl_dump
*dump
)
3701 struct dpif_netlink_dp request
;
3704 dpif_netlink_dp_init(&request
);
3705 request
.cmd
= OVS_DP_CMD_GET
;
3707 buf
= ofpbuf_new(1024);
3708 dpif_netlink_dp_to_ofpbuf(&request
, buf
);
3709 nl_dump_start(dump
, NETLINK_GENERIC
, buf
);
3713 /* Executes 'request' in the kernel datapath. If the command fails, returns a
3714 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
3715 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
3716 * result of the command is expected to be of the same form, which is decoded
3717 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
3718 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
3720 dpif_netlink_dp_transact(const struct dpif_netlink_dp
*request
,
3721 struct dpif_netlink_dp
*reply
, struct ofpbuf
**bufp
)
3723 struct ofpbuf
*request_buf
;
3726 ovs_assert((reply
!= NULL
) == (bufp
!= NULL
));
3728 request_buf
= ofpbuf_new(1024);
3729 dpif_netlink_dp_to_ofpbuf(request
, request_buf
);
3730 error
= nl_transact(NETLINK_GENERIC
, request_buf
, bufp
);
3731 ofpbuf_delete(request_buf
);
3734 dpif_netlink_dp_init(reply
);
3736 error
= dpif_netlink_dp_from_ofpbuf(reply
, *bufp
);
3739 ofpbuf_delete(*bufp
);
3746 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
3747 * The caller must free '*bufp' when the reply is no longer needed ('reply'
3748 * will contain pointers into '*bufp'). */
3750 dpif_netlink_dp_get(const struct dpif
*dpif_
, struct dpif_netlink_dp
*reply
,
3751 struct ofpbuf
**bufp
)
3753 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
3754 struct dpif_netlink_dp request
;
3756 dpif_netlink_dp_init(&request
);
3757 request
.cmd
= OVS_DP_CMD_GET
;
3758 request
.dp_ifindex
= dpif
->dp_ifindex
;
3760 return dpif_netlink_dp_transact(&request
, reply
, bufp
);
3763 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
3764 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
3765 * positive errno value.
3767 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
3768 * while 'flow' is still in use. */
3770 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow
*flow
,
3771 const struct ofpbuf
*buf
)
3773 static const struct nl_policy ovs_flow_policy
[__OVS_FLOW_ATTR_MAX
] = {
3774 [OVS_FLOW_ATTR_KEY
] = { .type
= NL_A_NESTED
, .optional
= true },
3775 [OVS_FLOW_ATTR_MASK
] = { .type
= NL_A_NESTED
, .optional
= true },
3776 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NL_A_NESTED
, .optional
= true },
3777 [OVS_FLOW_ATTR_STATS
] = { NL_POLICY_FOR(struct ovs_flow_stats
),
3779 [OVS_FLOW_ATTR_TCP_FLAGS
] = { .type
= NL_A_U8
, .optional
= true },
3780 [OVS_FLOW_ATTR_USED
] = { .type
= NL_A_U64
, .optional
= true },
3781 [OVS_FLOW_ATTR_UFID
] = { .type
= NL_A_U128
, .optional
= true },
3782 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
3783 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
3784 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
3787 dpif_netlink_flow_init(flow
);
3789 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
3790 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
3791 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
3792 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
3794 struct nlattr
*a
[ARRAY_SIZE(ovs_flow_policy
)];
3795 if (!nlmsg
|| !genl
|| !ovs_header
3796 || nlmsg
->nlmsg_type
!= ovs_flow_family
3797 || !nl_policy_parse(&b
, 0, ovs_flow_policy
, a
,
3798 ARRAY_SIZE(ovs_flow_policy
))) {
3801 if (!a
[OVS_FLOW_ATTR_KEY
] && !a
[OVS_FLOW_ATTR_UFID
]) {
3805 flow
->nlmsg_flags
= nlmsg
->nlmsg_flags
;
3806 flow
->dp_ifindex
= ovs_header
->dp_ifindex
;
3807 if (a
[OVS_FLOW_ATTR_KEY
]) {
3808 flow
->key
= nl_attr_get(a
[OVS_FLOW_ATTR_KEY
]);
3809 flow
->key_len
= nl_attr_get_size(a
[OVS_FLOW_ATTR_KEY
]);
3812 if (a
[OVS_FLOW_ATTR_UFID
]) {
3813 flow
->ufid
= nl_attr_get_u128(a
[OVS_FLOW_ATTR_UFID
]);
3814 flow
->ufid_present
= true;
3816 if (a
[OVS_FLOW_ATTR_MASK
]) {
3817 flow
->mask
= nl_attr_get(a
[OVS_FLOW_ATTR_MASK
]);
3818 flow
->mask_len
= nl_attr_get_size(a
[OVS_FLOW_ATTR_MASK
]);
3820 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
3821 flow
->actions
= nl_attr_get(a
[OVS_FLOW_ATTR_ACTIONS
]);
3822 flow
->actions_len
= nl_attr_get_size(a
[OVS_FLOW_ATTR_ACTIONS
]);
3824 if (a
[OVS_FLOW_ATTR_STATS
]) {
3825 flow
->stats
= nl_attr_get(a
[OVS_FLOW_ATTR_STATS
]);
3827 if (a
[OVS_FLOW_ATTR_TCP_FLAGS
]) {
3828 flow
->tcp_flags
= nl_attr_get(a
[OVS_FLOW_ATTR_TCP_FLAGS
]);
3830 if (a
[OVS_FLOW_ATTR_USED
]) {
3831 flow
->used
= nl_attr_get(a
[OVS_FLOW_ATTR_USED
]);
3838 * If PACKET_TYPE attribute is present in 'data', it filters PACKET_TYPE out.
3839 * If the flow is not Ethernet, the OVS_KEY_ATTR_PACKET_TYPE is converted to
3840 * OVS_KEY_ATTR_ETHERTYPE. Puts 'data' to 'buf'.
3843 put_exclude_packet_type(struct ofpbuf
*buf
, uint16_t type
,
3844 const struct nlattr
*data
, uint16_t data_len
)
3846 const struct nlattr
*packet_type
;
3848 packet_type
= nl_attr_find__(data
, data_len
, OVS_KEY_ATTR_PACKET_TYPE
);
3851 /* exclude PACKET_TYPE Netlink attribute. */
3852 ovs_assert(NLA_ALIGN(packet_type
->nla_len
) == NL_A_U32_SIZE
);
3853 size_t packet_type_len
= NL_A_U32_SIZE
;
3854 size_t first_chunk_size
= (uint8_t *)packet_type
- (uint8_t *)data
;
3855 size_t second_chunk_size
= data_len
- first_chunk_size
3857 struct nlattr
*next_attr
= nl_attr_next(packet_type
);
3860 ofs
= nl_msg_start_nested(buf
, type
);
3861 nl_msg_put(buf
, data
, first_chunk_size
);
3862 nl_msg_put(buf
, next_attr
, second_chunk_size
);
3863 if (!nl_attr_find__(data
, data_len
, OVS_KEY_ATTR_ETHERNET
)) {
3864 ovs_be16 pt
= pt_ns_type_be(nl_attr_get_be32(packet_type
));
3865 const struct nlattr
*nla
;
3867 nla
= nl_attr_find(buf
, NLA_HDRLEN
, OVS_KEY_ATTR_ETHERTYPE
);
3869 ovs_be16
*ethertype
;
3871 ethertype
= CONST_CAST(ovs_be16
*, nl_attr_get(nla
));
3874 nl_msg_put_be16(buf
, OVS_KEY_ATTR_ETHERTYPE
, pt
);
3877 nl_msg_end_nested(buf
, ofs
);
3879 nl_msg_put_unspec(buf
, type
, data
, data_len
);
3883 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
3884 * followed by Netlink attributes corresponding to 'flow'. */
3886 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow
*flow
,
3889 struct ovs_header
*ovs_header
;
3891 nl_msg_put_genlmsghdr(buf
, 0, ovs_flow_family
,
3892 NLM_F_REQUEST
| flow
->nlmsg_flags
,
3893 flow
->cmd
, OVS_FLOW_VERSION
);
3895 ovs_header
= ofpbuf_put_uninit(buf
, sizeof *ovs_header
);
3896 ovs_header
->dp_ifindex
= flow
->dp_ifindex
;
3898 if (flow
->ufid_present
) {
3899 nl_msg_put_u128(buf
, OVS_FLOW_ATTR_UFID
, flow
->ufid
);
3901 if (flow
->ufid_terse
) {
3902 nl_msg_put_u32(buf
, OVS_FLOW_ATTR_UFID_FLAGS
,
3903 OVS_UFID_F_OMIT_KEY
| OVS_UFID_F_OMIT_MASK
3904 | OVS_UFID_F_OMIT_ACTIONS
);
3906 if (!flow
->ufid_terse
|| !flow
->ufid_present
) {
3907 if (flow
->key_len
) {
3908 put_exclude_packet_type(buf
, OVS_FLOW_ATTR_KEY
, flow
->key
,
3911 if (flow
->mask_len
) {
3912 put_exclude_packet_type(buf
, OVS_FLOW_ATTR_MASK
, flow
->mask
,
3915 if (flow
->actions
|| flow
->actions_len
) {
3916 nl_msg_put_unspec(buf
, OVS_FLOW_ATTR_ACTIONS
,
3917 flow
->actions
, flow
->actions_len
);
3921 /* We never need to send these to the kernel. */
3922 ovs_assert(!flow
->stats
);
3923 ovs_assert(!flow
->tcp_flags
);
3924 ovs_assert(!flow
->used
);
3927 nl_msg_put_flag(buf
, OVS_FLOW_ATTR_CLEAR
);
3930 nl_msg_put_flag(buf
, OVS_FLOW_ATTR_PROBE
);
3934 /* Clears 'flow' to "empty" values. */
3936 dpif_netlink_flow_init(struct dpif_netlink_flow
*flow
)
3938 memset(flow
, 0, sizeof *flow
);
3941 /* Executes 'request' in the kernel datapath. If the command fails, returns a
3942 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
3943 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
3944 * result of the command is expected to be a flow also, which is decoded and
3945 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
3946 * is no longer needed ('reply' will contain pointers into '*bufp'). */
3948 dpif_netlink_flow_transact(struct dpif_netlink_flow
*request
,
3949 struct dpif_netlink_flow
*reply
,
3950 struct ofpbuf
**bufp
)
3952 struct ofpbuf
*request_buf
;
3955 ovs_assert((reply
!= NULL
) == (bufp
!= NULL
));
3958 request
->nlmsg_flags
|= NLM_F_ECHO
;
3961 request_buf
= ofpbuf_new(1024);
3962 dpif_netlink_flow_to_ofpbuf(request
, request_buf
);
3963 error
= nl_transact(NETLINK_GENERIC
, request_buf
, bufp
);
3964 ofpbuf_delete(request_buf
);
3968 error
= dpif_netlink_flow_from_ofpbuf(reply
, *bufp
);
3971 dpif_netlink_flow_init(reply
);
3972 ofpbuf_delete(*bufp
);
3980 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow
*flow
,
3981 struct dpif_flow_stats
*stats
)
3984 stats
->n_packets
= get_32aligned_u64(&flow
->stats
->n_packets
);
3985 stats
->n_bytes
= get_32aligned_u64(&flow
->stats
->n_bytes
);
3987 stats
->n_packets
= 0;
3990 stats
->used
= flow
->used
? get_32aligned_u64(flow
->used
) : 0;
3991 stats
->tcp_flags
= flow
->tcp_flags
? *flow
->tcp_flags
: 0;
3994 /* Logs information about a packet that was recently lost in 'ch' (in
3997 report_loss(struct dpif_netlink
*dpif
, struct dpif_channel
*ch
, uint32_t ch_idx
,
3998 uint32_t handler_id
)
4000 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 5);
4003 if (VLOG_DROP_WARN(&rl
)) {
4008 if (ch
->last_poll
!= LLONG_MIN
) {
4009 ds_put_format(&s
, " (last polled %lld ms ago)",
4010 time_msec() - ch
->last_poll
);
4013 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
4014 dpif_name(&dpif
->dpif
), ch_idx
, handler_id
);