2 * Copyright (c) 2008-2018 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "dpif-netlink.h"
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
31 #include <sys/epoll.h>
36 #include "dpif-netlink-rtnl.h"
37 #include "dpif-provider.h"
38 #include "fat-rwlock.h"
40 #include "netdev-linux.h"
41 #include "netdev-offload.h"
42 #include "netdev-provider.h"
43 #include "netdev-vport.h"
45 #include "netlink-conntrack.h"
46 #include "netlink-notifier.h"
47 #include "netlink-socket.h"
51 #include "openvswitch/dynamic-string.h"
52 #include "openvswitch/flow.h"
53 #include "openvswitch/hmap.h"
54 #include "openvswitch/match.h"
55 #include "openvswitch/ofpbuf.h"
56 #include "openvswitch/poll-loop.h"
57 #include "openvswitch/shash.h"
58 #include "openvswitch/thread.h"
59 #include "openvswitch/vlog.h"
64 #include "unaligned.h"
67 VLOG_DEFINE_THIS_MODULE(dpif_netlink
);
74 enum { MAX_PORTS
= USHRT_MAX
};
76 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
77 * missing if we have old headers. */
78 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
80 #define FLOW_DUMP_MAX_BATCH 50
81 #define OPERATE_MAX_OPS 50
83 #ifndef EPOLLEXCLUSIVE
84 #define EPOLLEXCLUSIVE (1u << 28)
87 struct dpif_netlink_dp
{
88 /* Generic Netlink header. */
91 /* struct ovs_header. */
95 const char *name
; /* OVS_DP_ATTR_NAME. */
96 const uint32_t *upcall_pid
; /* OVS_DP_ATTR_UPCALL_PID. */
97 uint32_t user_features
; /* OVS_DP_ATTR_USER_FEATURES */
98 const struct ovs_dp_stats
*stats
; /* OVS_DP_ATTR_STATS. */
99 const struct ovs_dp_megaflow_stats
*megaflow_stats
;
100 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
103 static void dpif_netlink_dp_init(struct dpif_netlink_dp
*);
104 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp
*,
105 const struct ofpbuf
*);
106 static void dpif_netlink_dp_dump_start(struct nl_dump
*);
107 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp
*request
,
108 struct dpif_netlink_dp
*reply
,
109 struct ofpbuf
**bufp
);
110 static int dpif_netlink_dp_get(const struct dpif
*,
111 struct dpif_netlink_dp
*reply
,
112 struct ofpbuf
**bufp
);
114 dpif_netlink_set_features(struct dpif
*dpif_
, uint32_t new_features
);
116 struct dpif_netlink_flow
{
117 /* Generic Netlink header. */
120 /* struct ovs_header. */
121 unsigned int nlmsg_flags
;
126 * The 'stats' member points to 64-bit data that might only be aligned on
127 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
130 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
131 * the Netlink version of the command, even if actions_len is zero. */
132 const struct nlattr
*key
; /* OVS_FLOW_ATTR_KEY. */
134 const struct nlattr
*mask
; /* OVS_FLOW_ATTR_MASK. */
136 const struct nlattr
*actions
; /* OVS_FLOW_ATTR_ACTIONS. */
138 ovs_u128 ufid
; /* OVS_FLOW_ATTR_FLOW_ID. */
139 bool ufid_present
; /* Is there a UFID? */
140 bool ufid_terse
; /* Skip serializing key/mask/acts? */
141 const struct ovs_flow_stats
*stats
; /* OVS_FLOW_ATTR_STATS. */
142 const uint8_t *tcp_flags
; /* OVS_FLOW_ATTR_TCP_FLAGS. */
143 const ovs_32aligned_u64
*used
; /* OVS_FLOW_ATTR_USED. */
144 bool clear
; /* OVS_FLOW_ATTR_CLEAR. */
145 bool probe
; /* OVS_FLOW_ATTR_PROBE. */
148 static void dpif_netlink_flow_init(struct dpif_netlink_flow
*);
149 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow
*,
150 const struct ofpbuf
*);
151 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow
*,
153 static int dpif_netlink_flow_transact(struct dpif_netlink_flow
*request
,
154 struct dpif_netlink_flow
*reply
,
155 struct ofpbuf
**bufp
);
156 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow
*,
157 struct dpif_flow_stats
*);
158 static void dpif_netlink_flow_to_dpif_flow(struct dpif_flow
*,
159 const struct dpif_netlink_flow
*);
161 /* One of the dpif channels between the kernel and userspace. */
162 struct dpif_channel
{
163 struct nl_sock
*sock
; /* Netlink socket. */
164 long long int last_poll
; /* Last time this channel was polled. */
168 #define VPORT_SOCK_POOL_SIZE 1
169 /* On Windows, there is no native support for epoll. There are equivalent
170 * interfaces though, that are not used currently. For simpicity, a pool of
171 * netlink sockets is used. Each socket is represented by 'struct
172 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
173 * sharing the same socket. In the future, we can add a reference count and
175 struct dpif_windows_vport_sock
{
176 struct nl_sock
*nl_sock
; /* netlink socket. */
180 struct dpif_handler
{
181 struct epoll_event
*epoll_events
;
182 int epoll_fd
; /* epoll fd that includes channel socks. */
183 int n_events
; /* Num events returned by epoll_wait(). */
184 int event_offset
; /* Offset into 'epoll_events'. */
187 /* Pool of sockets. */
188 struct dpif_windows_vport_sock
*vport_sock_pool
;
189 size_t last_used_pool_idx
; /* Index to aid in allocating a
190 socket in the pool to a port. */
194 /* Datapath interface for the openvswitch Linux kernel module. */
195 struct dpif_netlink
{
198 uint32_t user_features
;
200 /* Upcall messages. */
201 struct fat_rwlock upcall_lock
;
202 struct dpif_handler
*handlers
;
203 uint32_t n_handlers
; /* Num of upcall handlers. */
204 struct dpif_channel
*channels
; /* Array of channels for each port. */
205 int uc_array_size
; /* Size of 'handler->channels' and */
206 /* 'handler->epoll_events'. */
208 /* Change notification. */
209 struct nl_sock
*port_notifier
; /* vport multicast group subscriber. */
210 bool refresh_channels
;
213 static void report_loss(struct dpif_netlink
*, struct dpif_channel
*,
214 uint32_t ch_idx
, uint32_t handler_id
);
216 static struct vlog_rate_limit error_rl
= VLOG_RATE_LIMIT_INIT(9999, 5);
218 /* Generic Netlink family numbers for OVS.
220 * Initialized by dpif_netlink_init(). */
221 static int ovs_datapath_family
;
222 static int ovs_vport_family
;
223 static int ovs_flow_family
;
224 static int ovs_packet_family
;
225 static int ovs_meter_family
;
226 static int ovs_ct_limit_family
;
228 /* Generic Netlink multicast groups for OVS.
230 * Initialized by dpif_netlink_init(). */
231 static unsigned int ovs_vport_mcgroup
;
233 /* If true, tunnel devices are created using OVS compat/genetlink.
234 * If false, tunnel devices are created with rtnetlink and using light weight
235 * tunnels. If we fail to create the tunnel the rtnetlink+LWT, then we fallback
236 * to using the compat interface. */
237 static bool ovs_tunnels_out_of_tree
= true;
239 static int dpif_netlink_init(void);
240 static int open_dpif(const struct dpif_netlink_dp
*, struct dpif
**);
241 static uint32_t dpif_netlink_port_get_pid(const struct dpif
*,
243 static void dpif_netlink_handler_uninit(struct dpif_handler
*handler
);
244 static int dpif_netlink_refresh_channels(struct dpif_netlink
*,
245 uint32_t n_handlers
);
246 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport
*,
248 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport
*,
249 const struct ofpbuf
*);
250 static int dpif_netlink_port_query__(const struct dpif_netlink
*dpif
,
251 odp_port_t port_no
, const char *port_name
,
252 struct dpif_port
*dpif_port
);
255 create_nl_sock(struct dpif_netlink
*dpif OVS_UNUSED
, struct nl_sock
**sockp
)
256 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
259 return nl_sock_create(NETLINK_GENERIC
, sockp
);
261 /* Pick netlink sockets to use in a round-robin fashion from each
262 * handler's pool of sockets. */
263 struct dpif_handler
*handler
= &dpif
->handlers
[0];
264 struct dpif_windows_vport_sock
*sock_pool
= handler
->vport_sock_pool
;
265 size_t index
= handler
->last_used_pool_idx
;
267 /* A pool of sockets is allocated when the handler is initialized. */
268 if (sock_pool
== NULL
) {
273 ovs_assert(index
< VPORT_SOCK_POOL_SIZE
);
274 *sockp
= sock_pool
[index
].nl_sock
;
276 index
= (index
== VPORT_SOCK_POOL_SIZE
- 1) ? 0 : index
+ 1;
277 handler
->last_used_pool_idx
= index
;
283 close_nl_sock(struct nl_sock
*sock
)
286 nl_sock_destroy(sock
);
290 static struct dpif_netlink
*
291 dpif_netlink_cast(const struct dpif
*dpif
)
293 dpif_assert_class(dpif
, &dpif_netlink_class
);
294 return CONTAINER_OF(dpif
, struct dpif_netlink
, dpif
);
298 dpif_netlink_enumerate(struct sset
*all_dps
,
299 const struct dpif_class
*dpif_class OVS_UNUSED
)
302 uint64_t reply_stub
[NL_DUMP_BUFSIZE
/ 8];
303 struct ofpbuf msg
, buf
;
306 error
= dpif_netlink_init();
311 ofpbuf_use_stub(&buf
, reply_stub
, sizeof reply_stub
);
312 dpif_netlink_dp_dump_start(&dump
);
313 while (nl_dump_next(&dump
, &msg
, &buf
)) {
314 struct dpif_netlink_dp dp
;
316 if (!dpif_netlink_dp_from_ofpbuf(&dp
, &msg
)) {
317 sset_add(all_dps
, dp
.name
);
321 return nl_dump_done(&dump
);
325 dpif_netlink_open(const struct dpif_class
*class OVS_UNUSED
, const char *name
,
326 bool create
, struct dpif
**dpifp
)
328 struct dpif_netlink_dp dp_request
, dp
;
333 error
= dpif_netlink_init();
338 /* Create or look up datapath. */
339 dpif_netlink_dp_init(&dp_request
);
341 dp_request
.upcall_pid
= &upcall_pid
;
342 dp_request
.name
= name
;
345 dp_request
.cmd
= OVS_DP_CMD_NEW
;
347 dp_request
.cmd
= OVS_DP_CMD_GET
;
349 error
= dpif_netlink_dp_transact(&dp_request
, &dp
, &buf
);
353 dp_request
.user_features
= dp
.user_features
;
356 /* Use OVS_DP_CMD_SET to report user features */
357 dp_request
.cmd
= OVS_DP_CMD_SET
;
360 dp_request
.user_features
|= OVS_DP_F_UNALIGNED
;
361 dp_request
.user_features
|= OVS_DP_F_VPORT_PIDS
;
362 error
= dpif_netlink_dp_transact(&dp_request
, &dp
, &buf
);
367 error
= open_dpif(&dp
, dpifp
);
368 dpif_netlink_set_features(*dpifp
, OVS_DP_F_TC_RECIRC_SHARING
);
375 open_dpif(const struct dpif_netlink_dp
*dp
, struct dpif
**dpifp
)
377 struct dpif_netlink
*dpif
;
379 dpif
= xzalloc(sizeof *dpif
);
380 dpif
->port_notifier
= NULL
;
381 fat_rwlock_init(&dpif
->upcall_lock
);
383 dpif_init(&dpif
->dpif
, &dpif_netlink_class
, dp
->name
,
384 dp
->dp_ifindex
, dp
->dp_ifindex
);
386 dpif
->dp_ifindex
= dp
->dp_ifindex
;
387 dpif
->user_features
= dp
->user_features
;
388 *dpifp
= &dpif
->dpif
;
395 vport_delete_sock_pool(struct dpif_handler
*handler
)
396 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
398 if (handler
->vport_sock_pool
) {
400 struct dpif_windows_vport_sock
*sock_pool
=
401 handler
->vport_sock_pool
;
403 for (i
= 0; i
< VPORT_SOCK_POOL_SIZE
; i
++) {
404 if (sock_pool
[i
].nl_sock
) {
405 nl_sock_unsubscribe_packets(sock_pool
[i
].nl_sock
);
406 nl_sock_destroy(sock_pool
[i
].nl_sock
);
407 sock_pool
[i
].nl_sock
= NULL
;
411 free(handler
->vport_sock_pool
);
412 handler
->vport_sock_pool
= NULL
;
417 vport_create_sock_pool(struct dpif_handler
*handler
)
418 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
420 struct dpif_windows_vport_sock
*sock_pool
;
424 sock_pool
= xzalloc(VPORT_SOCK_POOL_SIZE
* sizeof *sock_pool
);
425 for (i
= 0; i
< VPORT_SOCK_POOL_SIZE
; i
++) {
426 error
= nl_sock_create(NETLINK_GENERIC
, &sock_pool
[i
].nl_sock
);
431 /* Enable the netlink socket to receive packets. This is equivalent to
432 * calling nl_sock_join_mcgroup() to receive events. */
433 error
= nl_sock_subscribe_packets(sock_pool
[i
].nl_sock
);
439 handler
->vport_sock_pool
= sock_pool
;
440 handler
->last_used_pool_idx
= 0;
444 vport_delete_sock_pool(handler
);
449 /* Given the port number 'port_idx', extracts the pid of netlink socket
450 * associated to the port and assigns it to 'upcall_pid'. */
452 vport_get_pid(struct dpif_netlink
*dpif
, uint32_t port_idx
,
453 uint32_t *upcall_pid
)
455 /* Since the nl_sock can only be assigned in either all
456 * or none "dpif" channels, the following check
458 if (!dpif
->channels
[port_idx
].sock
) {
461 ovs_assert(!WINDOWS
|| dpif
->n_handlers
<= 1);
463 *upcall_pid
= nl_sock_pid(dpif
->channels
[port_idx
].sock
);
469 vport_add_channel(struct dpif_netlink
*dpif
, odp_port_t port_no
,
470 struct nl_sock
*sock
)
472 struct epoll_event event
;
473 uint32_t port_idx
= odp_to_u32(port_no
);
477 if (dpif
->handlers
== NULL
) {
482 /* We assume that the datapath densely chooses port numbers, which can
483 * therefore be used as an index into 'channels' and 'epoll_events' of
485 if (port_idx
>= dpif
->uc_array_size
) {
486 uint32_t new_size
= port_idx
+ 1;
488 if (new_size
> MAX_PORTS
) {
489 VLOG_WARN_RL(&error_rl
, "%s: datapath port %"PRIu32
" too big",
490 dpif_name(&dpif
->dpif
), port_no
);
494 dpif
->channels
= xrealloc(dpif
->channels
,
495 new_size
* sizeof *dpif
->channels
);
497 for (i
= dpif
->uc_array_size
; i
< new_size
; i
++) {
498 dpif
->channels
[i
].sock
= NULL
;
501 for (i
= 0; i
< dpif
->n_handlers
; i
++) {
502 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
504 handler
->epoll_events
= xrealloc(handler
->epoll_events
,
505 new_size
* sizeof *handler
->epoll_events
);
508 dpif
->uc_array_size
= new_size
;
511 memset(&event
, 0, sizeof event
);
512 event
.events
= EPOLLIN
| EPOLLEXCLUSIVE
;
513 event
.data
.u32
= port_idx
;
515 for (i
= 0; i
< dpif
->n_handlers
; i
++) {
516 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
519 if (epoll_ctl(handler
->epoll_fd
, EPOLL_CTL_ADD
, nl_sock_fd(sock
),
526 dpif
->channels
[port_idx
].sock
= sock
;
527 dpif
->channels
[port_idx
].last_poll
= LLONG_MIN
;
534 epoll_ctl(dpif
->handlers
[i
].epoll_fd
, EPOLL_CTL_DEL
,
535 nl_sock_fd(sock
), NULL
);
538 dpif
->channels
[port_idx
].sock
= NULL
;
544 vport_del_channels(struct dpif_netlink
*dpif
, odp_port_t port_no
)
546 uint32_t port_idx
= odp_to_u32(port_no
);
549 if (!dpif
->handlers
|| port_idx
>= dpif
->uc_array_size
550 || !dpif
->channels
[port_idx
].sock
) {
554 for (i
= 0; i
< dpif
->n_handlers
; i
++) {
555 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
557 epoll_ctl(handler
->epoll_fd
, EPOLL_CTL_DEL
,
558 nl_sock_fd(dpif
->channels
[port_idx
].sock
), NULL
);
560 handler
->event_offset
= handler
->n_events
= 0;
563 nl_sock_destroy(dpif
->channels
[port_idx
].sock
);
565 dpif
->channels
[port_idx
].sock
= NULL
;
569 destroy_all_channels(struct dpif_netlink
*dpif
)
570 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
574 if (!dpif
->handlers
) {
578 for (i
= 0; i
< dpif
->uc_array_size
; i
++ ) {
579 struct dpif_netlink_vport vport_request
;
580 uint32_t upcall_pids
= 0;
582 if (!dpif
->channels
[i
].sock
) {
586 /* Turn off upcalls. */
587 dpif_netlink_vport_init(&vport_request
);
588 vport_request
.cmd
= OVS_VPORT_CMD_SET
;
589 vport_request
.dp_ifindex
= dpif
->dp_ifindex
;
590 vport_request
.port_no
= u32_to_odp(i
);
591 vport_request
.n_upcall_pids
= 1;
592 vport_request
.upcall_pids
= &upcall_pids
;
593 dpif_netlink_vport_transact(&vport_request
, NULL
, NULL
);
595 vport_del_channels(dpif
, u32_to_odp(i
));
598 for (i
= 0; i
< dpif
->n_handlers
; i
++) {
599 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
601 dpif_netlink_handler_uninit(handler
);
602 free(handler
->epoll_events
);
604 free(dpif
->channels
);
605 free(dpif
->handlers
);
606 dpif
->handlers
= NULL
;
607 dpif
->channels
= NULL
;
608 dpif
->n_handlers
= 0;
609 dpif
->uc_array_size
= 0;
613 dpif_netlink_close(struct dpif
*dpif_
)
615 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
617 nl_sock_destroy(dpif
->port_notifier
);
619 fat_rwlock_wrlock(&dpif
->upcall_lock
);
620 destroy_all_channels(dpif
);
621 fat_rwlock_unlock(&dpif
->upcall_lock
);
623 fat_rwlock_destroy(&dpif
->upcall_lock
);
628 dpif_netlink_destroy(struct dpif
*dpif_
)
630 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
631 struct dpif_netlink_dp dp
;
633 dpif_netlink_dp_init(&dp
);
634 dp
.cmd
= OVS_DP_CMD_DEL
;
635 dp
.dp_ifindex
= dpif
->dp_ifindex
;
636 return dpif_netlink_dp_transact(&dp
, NULL
, NULL
);
640 dpif_netlink_run(struct dpif
*dpif_
)
642 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
644 if (dpif
->refresh_channels
) {
645 dpif
->refresh_channels
= false;
646 fat_rwlock_wrlock(&dpif
->upcall_lock
);
647 dpif_netlink_refresh_channels(dpif
, dpif
->n_handlers
);
648 fat_rwlock_unlock(&dpif
->upcall_lock
);
654 dpif_netlink_get_stats(const struct dpif
*dpif_
, struct dpif_dp_stats
*stats
)
656 struct dpif_netlink_dp dp
;
660 error
= dpif_netlink_dp_get(dpif_
, &dp
, &buf
);
662 memset(stats
, 0, sizeof *stats
);
665 stats
->n_hit
= get_32aligned_u64(&dp
.stats
->n_hit
);
666 stats
->n_missed
= get_32aligned_u64(&dp
.stats
->n_missed
);
667 stats
->n_lost
= get_32aligned_u64(&dp
.stats
->n_lost
);
668 stats
->n_flows
= get_32aligned_u64(&dp
.stats
->n_flows
);
671 if (dp
.megaflow_stats
) {
672 stats
->n_masks
= dp
.megaflow_stats
->n_masks
;
673 stats
->n_mask_hit
= get_32aligned_u64(
674 &dp
.megaflow_stats
->n_mask_hit
);
676 stats
->n_masks
= UINT32_MAX
;
677 stats
->n_mask_hit
= UINT64_MAX
;
685 dpif_netlink_set_features(struct dpif
*dpif_
, uint32_t new_features
)
687 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
688 struct dpif_netlink_dp request
, reply
;
692 dpif_netlink_dp_init(&request
);
693 request
.cmd
= OVS_DP_CMD_SET
;
694 request
.name
= dpif_
->base_name
;
695 request
.dp_ifindex
= dpif
->dp_ifindex
;
696 request
.user_features
= dpif
->user_features
| new_features
;
698 error
= dpif_netlink_dp_transact(&request
, &reply
, &bufp
);
700 dpif
->user_features
= reply
.user_features
;
702 if (!(dpif
->user_features
& new_features
)) {
711 get_vport_type(const struct dpif_netlink_vport
*vport
)
713 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
715 switch (vport
->type
) {
716 case OVS_VPORT_TYPE_NETDEV
: {
717 const char *type
= netdev_get_type_from_name(vport
->name
);
719 return type
? type
: "system";
722 case OVS_VPORT_TYPE_INTERNAL
:
725 case OVS_VPORT_TYPE_GENEVE
:
728 case OVS_VPORT_TYPE_GRE
:
731 case OVS_VPORT_TYPE_VXLAN
:
734 case OVS_VPORT_TYPE_LISP
:
737 case OVS_VPORT_TYPE_STT
:
740 case OVS_VPORT_TYPE_ERSPAN
:
743 case OVS_VPORT_TYPE_IP6ERSPAN
:
746 case OVS_VPORT_TYPE_IP6GRE
:
749 case OVS_VPORT_TYPE_GTPU
:
752 case OVS_VPORT_TYPE_UNSPEC
:
753 case __OVS_VPORT_TYPE_MAX
:
757 VLOG_WARN_RL(&rl
, "dp%d: port `%s' has unsupported type %u",
758 vport
->dp_ifindex
, vport
->name
, (unsigned int) vport
->type
);
763 netdev_to_ovs_vport_type(const char *type
)
765 if (!strcmp(type
, "tap") || !strcmp(type
, "system")) {
766 return OVS_VPORT_TYPE_NETDEV
;
767 } else if (!strcmp(type
, "internal")) {
768 return OVS_VPORT_TYPE_INTERNAL
;
769 } else if (strstr(type
, "stt")) {
770 return OVS_VPORT_TYPE_STT
;
771 } else if (!strcmp(type
, "geneve")) {
772 return OVS_VPORT_TYPE_GENEVE
;
773 } else if (!strcmp(type
, "vxlan")) {
774 return OVS_VPORT_TYPE_VXLAN
;
775 } else if (!strcmp(type
, "lisp")) {
776 return OVS_VPORT_TYPE_LISP
;
777 } else if (!strcmp(type
, "erspan")) {
778 return OVS_VPORT_TYPE_ERSPAN
;
779 } else if (!strcmp(type
, "ip6erspan")) {
780 return OVS_VPORT_TYPE_IP6ERSPAN
;
781 } else if (!strcmp(type
, "ip6gre")) {
782 return OVS_VPORT_TYPE_IP6GRE
;
783 } else if (!strcmp(type
, "gre")) {
784 return OVS_VPORT_TYPE_GRE
;
785 } else if (!strcmp(type
, "gtpu")) {
786 return OVS_VPORT_TYPE_GTPU
;
788 return OVS_VPORT_TYPE_UNSPEC
;
793 dpif_netlink_port_add__(struct dpif_netlink
*dpif
, const char *name
,
794 enum ovs_vport_type type
,
795 struct ofpbuf
*options
,
796 odp_port_t
*port_nop
)
797 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
799 struct dpif_netlink_vport request
, reply
;
801 struct nl_sock
*sock
= NULL
;
802 uint32_t upcall_pids
= 0;
805 if (dpif
->handlers
) {
806 error
= create_nl_sock(dpif
, &sock
);
812 dpif_netlink_vport_init(&request
);
813 request
.cmd
= OVS_VPORT_CMD_NEW
;
814 request
.dp_ifindex
= dpif
->dp_ifindex
;
818 request
.port_no
= *port_nop
;
820 upcall_pids
= nl_sock_pid(sock
);
822 request
.n_upcall_pids
= 1;
823 request
.upcall_pids
= &upcall_pids
;
826 request
.options
= options
->data
;
827 request
.options_len
= options
->size
;
830 error
= dpif_netlink_vport_transact(&request
, &reply
, &buf
);
832 *port_nop
= reply
.port_no
;
834 if (error
== EBUSY
&& *port_nop
!= ODPP_NONE
) {
835 VLOG_INFO("%s: requested port %"PRIu32
" is in use",
836 dpif_name(&dpif
->dpif
), *port_nop
);
843 error
= vport_add_channel(dpif
, *port_nop
, sock
);
845 VLOG_INFO("%s: could not add channel for port %s",
846 dpif_name(&dpif
->dpif
), name
);
848 /* Delete the port. */
849 dpif_netlink_vport_init(&request
);
850 request
.cmd
= OVS_VPORT_CMD_DEL
;
851 request
.dp_ifindex
= dpif
->dp_ifindex
;
852 request
.port_no
= *port_nop
;
853 dpif_netlink_vport_transact(&request
, NULL
, NULL
);
865 dpif_netlink_port_add_compat(struct dpif_netlink
*dpif
, struct netdev
*netdev
,
866 odp_port_t
*port_nop
)
867 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
869 const struct netdev_tunnel_config
*tnl_cfg
;
870 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
871 const char *type
= netdev_get_type(netdev
);
872 uint64_t options_stub
[64 / 8];
873 enum ovs_vport_type ovs_type
;
874 struct ofpbuf options
;
877 name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
879 ovs_type
= netdev_to_ovs_vport_type(netdev_get_type(netdev
));
880 if (ovs_type
== OVS_VPORT_TYPE_UNSPEC
) {
881 VLOG_WARN_RL(&error_rl
, "%s: cannot create port `%s' because it has "
882 "unsupported type `%s'",
883 dpif_name(&dpif
->dpif
), name
, type
);
887 if (ovs_type
== OVS_VPORT_TYPE_NETDEV
) {
889 /* XXX : Map appropiate Windows handle */
891 netdev_linux_ethtool_set_flag(netdev
, ETH_FLAG_LRO
, "LRO", false);
896 if (ovs_type
== OVS_VPORT_TYPE_INTERNAL
) {
897 if (!create_wmi_port(name
)){
898 VLOG_ERR("Could not create wmi internal port with name:%s", name
);
904 tnl_cfg
= netdev_get_tunnel_config(netdev
);
905 if (tnl_cfg
&& (tnl_cfg
->dst_port
!= 0 || tnl_cfg
->exts
)) {
906 ofpbuf_use_stack(&options
, options_stub
, sizeof options_stub
);
907 if (tnl_cfg
->dst_port
) {
908 nl_msg_put_u16(&options
, OVS_TUNNEL_ATTR_DST_PORT
,
909 ntohs(tnl_cfg
->dst_port
));
915 ext_ofs
= nl_msg_start_nested(&options
, OVS_TUNNEL_ATTR_EXTENSION
);
916 for (i
= 0; i
< 32; i
++) {
917 if (tnl_cfg
->exts
& (1 << i
)) {
918 nl_msg_put_flag(&options
, i
);
921 nl_msg_end_nested(&options
, ext_ofs
);
923 return dpif_netlink_port_add__(dpif
, name
, ovs_type
, &options
,
926 return dpif_netlink_port_add__(dpif
, name
, ovs_type
, NULL
, port_nop
);
932 dpif_netlink_rtnl_port_create_and_add(struct dpif_netlink
*dpif
,
933 struct netdev
*netdev
,
934 odp_port_t
*port_nop
)
935 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
937 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
938 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
942 error
= dpif_netlink_rtnl_port_create(netdev
);
944 if (error
!= EOPNOTSUPP
) {
945 VLOG_WARN_RL(&rl
, "Failed to create %s with rtnetlink: %s",
946 netdev_get_name(netdev
), ovs_strerror(error
));
951 name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
952 error
= dpif_netlink_port_add__(dpif
, name
, OVS_VPORT_TYPE_NETDEV
, NULL
,
955 dpif_netlink_rtnl_port_destroy(name
, netdev_get_type(netdev
));
961 dpif_netlink_port_add(struct dpif
*dpif_
, struct netdev
*netdev
,
962 odp_port_t
*port_nop
)
964 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
965 int error
= EOPNOTSUPP
;
967 fat_rwlock_wrlock(&dpif
->upcall_lock
);
968 if (!ovs_tunnels_out_of_tree
) {
969 error
= dpif_netlink_rtnl_port_create_and_add(dpif
, netdev
, port_nop
);
972 error
= dpif_netlink_port_add_compat(dpif
, netdev
, port_nop
);
974 fat_rwlock_unlock(&dpif
->upcall_lock
);
980 dpif_netlink_port_del__(struct dpif_netlink
*dpif
, odp_port_t port_no
)
981 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
983 struct dpif_netlink_vport vport
;
984 struct dpif_port dpif_port
;
987 error
= dpif_netlink_port_query__(dpif
, port_no
, NULL
, &dpif_port
);
992 dpif_netlink_vport_init(&vport
);
993 vport
.cmd
= OVS_VPORT_CMD_DEL
;
994 vport
.dp_ifindex
= dpif
->dp_ifindex
;
995 vport
.port_no
= port_no
;
997 if (!strcmp(dpif_port
.type
, "internal")) {
998 if (!delete_wmi_port(dpif_port
.name
)) {
999 VLOG_ERR("Could not delete wmi port with name: %s",
1004 error
= dpif_netlink_vport_transact(&vport
, NULL
, NULL
);
1006 vport_del_channels(dpif
, port_no
);
1008 if (!error
&& !ovs_tunnels_out_of_tree
) {
1009 error
= dpif_netlink_rtnl_port_destroy(dpif_port
.name
, dpif_port
.type
);
1010 if (error
== EOPNOTSUPP
) {
1015 dpif_port_destroy(&dpif_port
);
1021 dpif_netlink_port_del(struct dpif
*dpif_
, odp_port_t port_no
)
1023 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1026 fat_rwlock_wrlock(&dpif
->upcall_lock
);
1027 error
= dpif_netlink_port_del__(dpif
, port_no
);
1028 fat_rwlock_unlock(&dpif
->upcall_lock
);
1034 dpif_netlink_port_query__(const struct dpif_netlink
*dpif
, odp_port_t port_no
,
1035 const char *port_name
, struct dpif_port
*dpif_port
)
1037 struct dpif_netlink_vport request
;
1038 struct dpif_netlink_vport reply
;
1042 dpif_netlink_vport_init(&request
);
1043 request
.cmd
= OVS_VPORT_CMD_GET
;
1044 request
.dp_ifindex
= dpif
->dp_ifindex
;
1045 request
.port_no
= port_no
;
1046 request
.name
= port_name
;
1048 error
= dpif_netlink_vport_transact(&request
, &reply
, &buf
);
1050 if (reply
.dp_ifindex
!= request
.dp_ifindex
) {
1051 /* A query by name reported that 'port_name' is in some datapath
1052 * other than 'dpif', but the caller wants to know about 'dpif'. */
1054 } else if (dpif_port
) {
1055 dpif_port
->name
= xstrdup(reply
.name
);
1056 dpif_port
->type
= xstrdup(get_vport_type(&reply
));
1057 dpif_port
->port_no
= reply
.port_no
;
1065 dpif_netlink_port_query_by_number(const struct dpif
*dpif_
, odp_port_t port_no
,
1066 struct dpif_port
*dpif_port
)
1068 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1070 return dpif_netlink_port_query__(dpif
, port_no
, NULL
, dpif_port
);
1074 dpif_netlink_port_query_by_name(const struct dpif
*dpif_
, const char *devname
,
1075 struct dpif_port
*dpif_port
)
1077 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1079 return dpif_netlink_port_query__(dpif
, 0, devname
, dpif_port
);
1083 dpif_netlink_port_get_pid__(const struct dpif_netlink
*dpif
,
1085 OVS_REQ_RDLOCK(dpif
->upcall_lock
)
1087 uint32_t port_idx
= odp_to_u32(port_no
);
1090 if (dpif
->handlers
&& dpif
->uc_array_size
> 0) {
1091 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1092 * channel, since it is not heavily loaded. */
1093 uint32_t idx
= port_idx
>= dpif
->uc_array_size
? 0 : port_idx
;
1095 /* Needs to check in case the socket pointer is changed in between
1096 * the holding of upcall_lock. A known case happens when the main
1097 * thread deletes the vport while the handler thread is handling
1098 * the upcall from that port. */
1099 if (dpif
->channels
[idx
].sock
) {
1100 pid
= nl_sock_pid(dpif
->channels
[idx
].sock
);
1108 dpif_netlink_port_get_pid(const struct dpif
*dpif_
, odp_port_t port_no
)
1110 const struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1113 fat_rwlock_rdlock(&dpif
->upcall_lock
);
1114 ret
= dpif_netlink_port_get_pid__(dpif
, port_no
);
1115 fat_rwlock_unlock(&dpif
->upcall_lock
);
1121 dpif_netlink_flow_flush(struct dpif
*dpif_
)
1123 const char *dpif_type_str
= dpif_normalize_type(dpif_type(dpif_
));
1124 const struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1125 struct dpif_netlink_flow flow
;
1127 dpif_netlink_flow_init(&flow
);
1128 flow
.cmd
= OVS_FLOW_CMD_DEL
;
1129 flow
.dp_ifindex
= dpif
->dp_ifindex
;
1131 if (netdev_is_flow_api_enabled()) {
1132 netdev_ports_flow_flush(dpif_type_str
);
1135 return dpif_netlink_flow_transact(&flow
, NULL
, NULL
);
1138 struct dpif_netlink_port_state
{
1139 struct nl_dump dump
;
1144 dpif_netlink_port_dump_start__(const struct dpif_netlink
*dpif
,
1145 struct nl_dump
*dump
)
1147 struct dpif_netlink_vport request
;
1150 dpif_netlink_vport_init(&request
);
1151 request
.cmd
= OVS_VPORT_CMD_GET
;
1152 request
.dp_ifindex
= dpif
->dp_ifindex
;
1154 buf
= ofpbuf_new(1024);
1155 dpif_netlink_vport_to_ofpbuf(&request
, buf
);
1156 nl_dump_start(dump
, NETLINK_GENERIC
, buf
);
1161 dpif_netlink_port_dump_start(const struct dpif
*dpif_
, void **statep
)
1163 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1164 struct dpif_netlink_port_state
*state
;
1166 *statep
= state
= xmalloc(sizeof *state
);
1167 dpif_netlink_port_dump_start__(dpif
, &state
->dump
);
1169 ofpbuf_init(&state
->buf
, NL_DUMP_BUFSIZE
);
1174 dpif_netlink_port_dump_next__(const struct dpif_netlink
*dpif
,
1175 struct nl_dump
*dump
,
1176 struct dpif_netlink_vport
*vport
,
1177 struct ofpbuf
*buffer
)
1182 if (!nl_dump_next(dump
, &buf
, buffer
)) {
1186 error
= dpif_netlink_vport_from_ofpbuf(vport
, &buf
);
1188 VLOG_WARN_RL(&error_rl
, "%s: failed to parse vport record (%s)",
1189 dpif_name(&dpif
->dpif
), ovs_strerror(error
));
1195 dpif_netlink_port_dump_next(const struct dpif
*dpif_
, void *state_
,
1196 struct dpif_port
*dpif_port
)
1198 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1199 struct dpif_netlink_port_state
*state
= state_
;
1200 struct dpif_netlink_vport vport
;
1203 error
= dpif_netlink_port_dump_next__(dpif
, &state
->dump
, &vport
,
1208 dpif_port
->name
= CONST_CAST(char *, vport
.name
);
1209 dpif_port
->type
= CONST_CAST(char *, get_vport_type(&vport
));
1210 dpif_port
->port_no
= vport
.port_no
;
1215 dpif_netlink_port_dump_done(const struct dpif
*dpif_ OVS_UNUSED
, void *state_
)
1217 struct dpif_netlink_port_state
*state
= state_
;
1218 int error
= nl_dump_done(&state
->dump
);
1220 ofpbuf_uninit(&state
->buf
);
1226 dpif_netlink_port_poll(const struct dpif
*dpif_
, char **devnamep
)
1228 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1230 /* Lazily create the Netlink socket to listen for notifications. */
1231 if (!dpif
->port_notifier
) {
1232 struct nl_sock
*sock
;
1235 error
= nl_sock_create(NETLINK_GENERIC
, &sock
);
1240 error
= nl_sock_join_mcgroup(sock
, ovs_vport_mcgroup
);
1242 nl_sock_destroy(sock
);
1245 dpif
->port_notifier
= sock
;
1247 /* We have no idea of the current state so report that everything
1253 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1254 uint64_t buf_stub
[4096 / 8];
1258 ofpbuf_use_stub(&buf
, buf_stub
, sizeof buf_stub
);
1259 error
= nl_sock_recv(dpif
->port_notifier
, &buf
, NULL
, false);
1261 struct dpif_netlink_vport vport
;
1263 error
= dpif_netlink_vport_from_ofpbuf(&vport
, &buf
);
1265 if (vport
.dp_ifindex
== dpif
->dp_ifindex
1266 && (vport
.cmd
== OVS_VPORT_CMD_NEW
1267 || vport
.cmd
== OVS_VPORT_CMD_DEL
1268 || vport
.cmd
== OVS_VPORT_CMD_SET
)) {
1269 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8
,
1270 dpif
->dpif
.full_name
, vport
.name
, vport
.cmd
);
1271 if (vport
.cmd
== OVS_VPORT_CMD_DEL
&& dpif
->handlers
) {
1272 dpif
->refresh_channels
= true;
1274 *devnamep
= xstrdup(vport
.name
);
1275 ofpbuf_uninit(&buf
);
1279 } else if (error
!= EAGAIN
) {
1280 VLOG_WARN_RL(&rl
, "error reading or parsing netlink (%s)",
1281 ovs_strerror(error
));
1282 nl_sock_drain(dpif
->port_notifier
);
1286 ofpbuf_uninit(&buf
);
1294 dpif_netlink_port_poll_wait(const struct dpif
*dpif_
)
1296 const struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1298 if (dpif
->port_notifier
) {
1299 nl_sock_wait(dpif
->port_notifier
, POLLIN
);
1301 poll_immediate_wake();
1306 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow
*request
,
1307 const ovs_u128
*ufid
, bool terse
)
1310 request
->ufid
= *ufid
;
1311 request
->ufid_present
= true;
1313 request
->ufid_present
= false;
1315 request
->ufid_terse
= terse
;
1319 dpif_netlink_init_flow_get__(const struct dpif_netlink
*dpif
,
1320 const struct nlattr
*key
, size_t key_len
,
1321 const ovs_u128
*ufid
, bool terse
,
1322 struct dpif_netlink_flow
*request
)
1324 dpif_netlink_flow_init(request
);
1325 request
->cmd
= OVS_FLOW_CMD_GET
;
1326 request
->dp_ifindex
= dpif
->dp_ifindex
;
1328 request
->key_len
= key_len
;
1329 dpif_netlink_flow_init_ufid(request
, ufid
, terse
);
1333 dpif_netlink_init_flow_get(const struct dpif_netlink
*dpif
,
1334 const struct dpif_flow_get
*get
,
1335 struct dpif_netlink_flow
*request
)
1337 dpif_netlink_init_flow_get__(dpif
, get
->key
, get
->key_len
, get
->ufid
,
1342 dpif_netlink_flow_get__(const struct dpif_netlink
*dpif
,
1343 const struct nlattr
*key
, size_t key_len
,
1344 const ovs_u128
*ufid
, bool terse
,
1345 struct dpif_netlink_flow
*reply
, struct ofpbuf
**bufp
)
1347 struct dpif_netlink_flow request
;
1349 dpif_netlink_init_flow_get__(dpif
, key
, key_len
, ufid
, terse
, &request
);
1350 return dpif_netlink_flow_transact(&request
, reply
, bufp
);
1354 dpif_netlink_flow_get(const struct dpif_netlink
*dpif
,
1355 const struct dpif_netlink_flow
*flow
,
1356 struct dpif_netlink_flow
*reply
, struct ofpbuf
**bufp
)
1358 return dpif_netlink_flow_get__(dpif
, flow
->key
, flow
->key_len
,
1359 flow
->ufid_present
? &flow
->ufid
: NULL
,
1360 false, reply
, bufp
);
1364 dpif_netlink_init_flow_put(struct dpif_netlink
*dpif
,
1365 const struct dpif_flow_put
*put
,
1366 struct dpif_netlink_flow
*request
)
1368 static const struct nlattr dummy_action
;
1370 dpif_netlink_flow_init(request
);
1371 request
->cmd
= (put
->flags
& DPIF_FP_CREATE
1372 ? OVS_FLOW_CMD_NEW
: OVS_FLOW_CMD_SET
);
1373 request
->dp_ifindex
= dpif
->dp_ifindex
;
1374 request
->key
= put
->key
;
1375 request
->key_len
= put
->key_len
;
1376 request
->mask
= put
->mask
;
1377 request
->mask_len
= put
->mask_len
;
1378 dpif_netlink_flow_init_ufid(request
, put
->ufid
, false);
1380 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1381 request
->actions
= (put
->actions
1383 : CONST_CAST(struct nlattr
*, &dummy_action
));
1384 request
->actions_len
= put
->actions_len
;
1385 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
1386 request
->clear
= true;
1388 if (put
->flags
& DPIF_FP_PROBE
) {
1389 request
->probe
= true;
1391 request
->nlmsg_flags
= put
->flags
& DPIF_FP_MODIFY
? 0 : NLM_F_CREATE
;
1395 dpif_netlink_init_flow_del__(struct dpif_netlink
*dpif
,
1396 const struct nlattr
*key
, size_t key_len
,
1397 const ovs_u128
*ufid
, bool terse
,
1398 struct dpif_netlink_flow
*request
)
1400 dpif_netlink_flow_init(request
);
1401 request
->cmd
= OVS_FLOW_CMD_DEL
;
1402 request
->dp_ifindex
= dpif
->dp_ifindex
;
1404 request
->key_len
= key_len
;
1405 dpif_netlink_flow_init_ufid(request
, ufid
, terse
);
1409 dpif_netlink_init_flow_del(struct dpif_netlink
*dpif
,
1410 const struct dpif_flow_del
*del
,
1411 struct dpif_netlink_flow
*request
)
1413 dpif_netlink_init_flow_del__(dpif
, del
->key
, del
->key_len
,
1414 del
->ufid
, del
->terse
, request
);
1417 struct dpif_netlink_flow_dump
{
1418 struct dpif_flow_dump up
;
1419 struct nl_dump nl_dump
;
1421 struct netdev_flow_dump
**netdev_dumps
;
1422 int netdev_dumps_num
; /* Number of netdev_flow_dumps */
1423 struct ovs_mutex netdev_lock
; /* Guards the following. */
1424 int netdev_current_dump OVS_GUARDED
; /* Shared current dump */
1425 struct dpif_flow_dump_types types
; /* Type of dump */
1428 static struct dpif_netlink_flow_dump
*
1429 dpif_netlink_flow_dump_cast(struct dpif_flow_dump
*dump
)
1431 return CONTAINER_OF(dump
, struct dpif_netlink_flow_dump
, up
);
1435 start_netdev_dump(const struct dpif
*dpif_
,
1436 struct dpif_netlink_flow_dump
*dump
)
1438 ovs_mutex_init(&dump
->netdev_lock
);
1440 if (!(dump
->types
.netdev_flows
)) {
1441 dump
->netdev_dumps_num
= 0;
1442 dump
->netdev_dumps
= NULL
;
1446 ovs_mutex_lock(&dump
->netdev_lock
);
1447 dump
->netdev_current_dump
= 0;
1449 = netdev_ports_flow_dump_create(dpif_normalize_type(dpif_type(dpif_
)),
1450 &dump
->netdev_dumps_num
,
1452 ovs_mutex_unlock(&dump
->netdev_lock
);
1456 dpif_netlink_populate_flow_dump_types(struct dpif_netlink_flow_dump
*dump
,
1457 struct dpif_flow_dump_types
*types
)
1460 dump
->types
.ovs_flows
= true;
1461 dump
->types
.netdev_flows
= true;
1463 memcpy(&dump
->types
, types
, sizeof *types
);
1467 static struct dpif_flow_dump
*
1468 dpif_netlink_flow_dump_create(const struct dpif
*dpif_
, bool terse
,
1469 struct dpif_flow_dump_types
*types
)
1471 const struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
1472 struct dpif_netlink_flow_dump
*dump
;
1473 struct dpif_netlink_flow request
;
1476 dump
= xmalloc(sizeof *dump
);
1477 dpif_flow_dump_init(&dump
->up
, dpif_
);
1479 dpif_netlink_populate_flow_dump_types(dump
, types
);
1481 if (dump
->types
.ovs_flows
) {
1482 dpif_netlink_flow_init(&request
);
1483 request
.cmd
= OVS_FLOW_CMD_GET
;
1484 request
.dp_ifindex
= dpif
->dp_ifindex
;
1485 request
.ufid_present
= false;
1486 request
.ufid_terse
= terse
;
1488 buf
= ofpbuf_new(1024);
1489 dpif_netlink_flow_to_ofpbuf(&request
, buf
);
1490 nl_dump_start(&dump
->nl_dump
, NETLINK_GENERIC
, buf
);
1493 atomic_init(&dump
->status
, 0);
1494 dump
->up
.terse
= terse
;
1496 start_netdev_dump(dpif_
, dump
);
1502 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
1504 struct dpif_netlink_flow_dump
*dump
= dpif_netlink_flow_dump_cast(dump_
);
1505 unsigned int nl_status
= 0;
1508 if (dump
->types
.ovs_flows
) {
1509 nl_status
= nl_dump_done(&dump
->nl_dump
);
1512 for (int i
= 0; i
< dump
->netdev_dumps_num
; i
++) {
1513 int err
= netdev_flow_dump_destroy(dump
->netdev_dumps
[i
]);
1515 if (err
!= 0 && err
!= EOPNOTSUPP
) {
1516 VLOG_ERR("failed dumping netdev: %s", ovs_strerror(err
));
1520 free(dump
->netdev_dumps
);
1521 ovs_mutex_destroy(&dump
->netdev_lock
);
1523 /* No other thread has access to 'dump' at this point. */
1524 atomic_read_relaxed(&dump
->status
, &dump_status
);
1526 return dump_status
? dump_status
: nl_status
;
1529 struct dpif_netlink_flow_dump_thread
{
1530 struct dpif_flow_dump_thread up
;
1531 struct dpif_netlink_flow_dump
*dump
;
1532 struct dpif_netlink_flow flow
;
1533 struct dpif_flow_stats stats
;
1534 struct ofpbuf nl_flows
; /* Always used to store flows. */
1535 struct ofpbuf
*nl_actions
; /* Used if kernel does not supply actions. */
1536 int netdev_dump_idx
; /* This thread current netdev dump index */
1537 bool netdev_done
; /* If we are finished dumping netdevs */
1539 /* (Key/Mask/Actions) Buffers for netdev dumping */
1540 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
1541 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
1542 struct odputil_keybuf actbuf
[FLOW_DUMP_MAX_BATCH
];
1545 static struct dpif_netlink_flow_dump_thread
*
1546 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
1548 return CONTAINER_OF(thread
, struct dpif_netlink_flow_dump_thread
, up
);
1551 static struct dpif_flow_dump_thread
*
1552 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
1554 struct dpif_netlink_flow_dump
*dump
= dpif_netlink_flow_dump_cast(dump_
);
1555 struct dpif_netlink_flow_dump_thread
*thread
;
1557 thread
= xmalloc(sizeof *thread
);
1558 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
1559 thread
->dump
= dump
;
1560 ofpbuf_init(&thread
->nl_flows
, NL_DUMP_BUFSIZE
);
1561 thread
->nl_actions
= NULL
;
1562 thread
->netdev_dump_idx
= 0;
1563 thread
->netdev_done
= !(thread
->netdev_dump_idx
< dump
->netdev_dumps_num
);
1569 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
1571 struct dpif_netlink_flow_dump_thread
*thread
1572 = dpif_netlink_flow_dump_thread_cast(thread_
);
1574 ofpbuf_uninit(&thread
->nl_flows
);
1575 ofpbuf_delete(thread
->nl_actions
);
1580 dpif_netlink_flow_to_dpif_flow(struct dpif_flow
*dpif_flow
,
1581 const struct dpif_netlink_flow
*datapath_flow
)
1583 dpif_flow
->key
= datapath_flow
->key
;
1584 dpif_flow
->key_len
= datapath_flow
->key_len
;
1585 dpif_flow
->mask
= datapath_flow
->mask
;
1586 dpif_flow
->mask_len
= datapath_flow
->mask_len
;
1587 dpif_flow
->actions
= datapath_flow
->actions
;
1588 dpif_flow
->actions_len
= datapath_flow
->actions_len
;
1589 dpif_flow
->ufid_present
= datapath_flow
->ufid_present
;
1590 dpif_flow
->pmd_id
= PMD_ID_NULL
;
1591 if (datapath_flow
->ufid_present
) {
1592 dpif_flow
->ufid
= datapath_flow
->ufid
;
1594 ovs_assert(datapath_flow
->key
&& datapath_flow
->key_len
);
1595 odp_flow_key_hash(datapath_flow
->key
, datapath_flow
->key_len
,
1598 dpif_netlink_flow_get_stats(datapath_flow
, &dpif_flow
->stats
);
1599 dpif_flow
->attrs
.offloaded
= false;
1600 dpif_flow
->attrs
.dp_layer
= "ovs";
1601 dpif_flow
->attrs
.dp_extra_info
= NULL
;
1604 /* The design is such that all threads are working together on the first dump
1605 * to the last, in order (at first they all on dump 0).
1606 * When the first thread finds that the given dump is finished,
1607 * they all move to the next. If two or more threads find the same dump
1608 * is finished at the same time, the first one will advance the shared
1609 * netdev_current_dump and the others will catch up. */
1611 dpif_netlink_advance_netdev_dump(struct dpif_netlink_flow_dump_thread
*thread
)
1613 struct dpif_netlink_flow_dump
*dump
= thread
->dump
;
1615 ovs_mutex_lock(&dump
->netdev_lock
);
1616 /* if we haven't finished (dumped everything) */
1617 if (dump
->netdev_current_dump
< dump
->netdev_dumps_num
) {
1618 /* if we are the first to find that current dump is finished
1620 if (thread
->netdev_dump_idx
== dump
->netdev_current_dump
) {
1621 thread
->netdev_dump_idx
= ++dump
->netdev_current_dump
;
1622 /* did we just finish the last dump? done. */
1623 if (dump
->netdev_current_dump
== dump
->netdev_dumps_num
) {
1624 thread
->netdev_done
= true;
1627 /* otherwise, we are behind, catch up */
1628 thread
->netdev_dump_idx
= dump
->netdev_current_dump
;
1631 /* some other thread finished */
1632 thread
->netdev_done
= true;
1634 ovs_mutex_unlock(&dump
->netdev_lock
);
1638 dpif_netlink_netdev_match_to_dpif_flow(struct match
*match
,
1639 struct ofpbuf
*key_buf
,
1640 struct ofpbuf
*mask_buf
,
1641 struct nlattr
*actions
,
1642 struct dpif_flow_stats
*stats
,
1643 struct dpif_flow_attrs
*attrs
,
1645 struct dpif_flow
*flow
,
1648 memset(flow
, 0, sizeof *flow
);
1651 struct odp_flow_key_parms odp_parms
= {
1652 .flow
= &match
->flow
,
1653 .mask
= &match
->wc
.masks
,
1655 .max_vlan_headers
= 2,
1666 offset
= key_buf
->size
;
1667 flow
->key
= ofpbuf_tail(key_buf
);
1668 odp_flow_key_from_flow(&odp_parms
, key_buf
);
1669 flow
->key_len
= key_buf
->size
- offset
;
1672 offset
= mask_buf
->size
;
1673 flow
->mask
= ofpbuf_tail(mask_buf
);
1674 odp_parms
.key_buf
= key_buf
;
1675 odp_flow_key_from_mask(&odp_parms
, mask_buf
);
1676 flow
->mask_len
= mask_buf
->size
- offset
;
1679 flow
->actions
= nl_attr_get(actions
);
1680 flow
->actions_len
= nl_attr_get_size(actions
);
1684 memcpy(&flow
->stats
, stats
, sizeof *stats
);
1687 flow
->ufid_present
= true;
1690 flow
->pmd_id
= PMD_ID_NULL
;
1692 memcpy(&flow
->attrs
, attrs
, sizeof *attrs
);
1698 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
1699 struct dpif_flow
*flows
, int max_flows
)
1701 struct dpif_netlink_flow_dump_thread
*thread
1702 = dpif_netlink_flow_dump_thread_cast(thread_
);
1703 struct dpif_netlink_flow_dump
*dump
= thread
->dump
;
1704 struct dpif_netlink
*dpif
= dpif_netlink_cast(thread
->up
.dpif
);
1707 ofpbuf_delete(thread
->nl_actions
);
1708 thread
->nl_actions
= NULL
;
1711 max_flows
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
1713 while (!thread
->netdev_done
&& n_flows
< max_flows
) {
1714 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[n_flows
];
1715 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[n_flows
];
1716 struct odputil_keybuf
*actbuf
= &thread
->actbuf
[n_flows
];
1717 struct ofpbuf key
, mask
, act
;
1718 struct dpif_flow
*f
= &flows
[n_flows
];
1719 int cur
= thread
->netdev_dump_idx
;
1720 struct netdev_flow_dump
*netdev_dump
= dump
->netdev_dumps
[cur
];
1722 struct nlattr
*actions
;
1723 struct dpif_flow_stats stats
;
1724 struct dpif_flow_attrs attrs
;
1728 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
1729 ofpbuf_use_stack(&act
, actbuf
, sizeof *actbuf
);
1730 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
1731 has_next
= netdev_flow_dump_next(netdev_dump
, &match
,
1732 &actions
, &stats
, &attrs
,
1737 dpif_netlink_netdev_match_to_dpif_flow(&match
,
1747 dpif_netlink_advance_netdev_dump(thread
);
1751 if (!(dump
->types
.ovs_flows
)) {
1756 || (n_flows
< max_flows
&& thread
->nl_flows
.size
)) {
1757 struct dpif_netlink_flow datapath_flow
;
1758 struct ofpbuf nl_flow
;
1761 /* Try to grab another flow. */
1762 if (!nl_dump_next(&dump
->nl_dump
, &nl_flow
, &thread
->nl_flows
)) {
1766 /* Convert the flow to our output format. */
1767 error
= dpif_netlink_flow_from_ofpbuf(&datapath_flow
, &nl_flow
);
1769 atomic_store_relaxed(&dump
->status
, error
);
1773 if (dump
->up
.terse
|| datapath_flow
.actions
) {
1774 /* Common case: we don't want actions, or the flow includes
1776 dpif_netlink_flow_to_dpif_flow(&flows
[n_flows
++], &datapath_flow
);
1778 /* Rare case: the flow does not include actions. Retrieve this
1779 * individual flow again to get the actions. */
1780 error
= dpif_netlink_flow_get(dpif
, &datapath_flow
,
1781 &datapath_flow
, &thread
->nl_actions
);
1782 if (error
== ENOENT
) {
1783 VLOG_DBG("dumped flow disappeared on get");
1786 VLOG_WARN("error fetching dumped flow: %s",
1787 ovs_strerror(error
));
1788 atomic_store_relaxed(&dump
->status
, error
);
1792 /* Save this flow. Then exit, because we only have one buffer to
1793 * handle this case. */
1794 dpif_netlink_flow_to_dpif_flow(&flows
[n_flows
++], &datapath_flow
);
1802 dpif_netlink_encode_execute(int dp_ifindex
, const struct dpif_execute
*d_exec
,
1805 struct ovs_header
*k_exec
;
1808 ofpbuf_prealloc_tailroom(buf
, (64
1809 + dp_packet_size(d_exec
->packet
)
1810 + ODP_KEY_METADATA_SIZE
1811 + d_exec
->actions_len
));
1813 nl_msg_put_genlmsghdr(buf
, 0, ovs_packet_family
, NLM_F_REQUEST
,
1814 OVS_PACKET_CMD_EXECUTE
, OVS_PACKET_VERSION
);
1816 k_exec
= ofpbuf_put_uninit(buf
, sizeof *k_exec
);
1817 k_exec
->dp_ifindex
= dp_ifindex
;
1819 nl_msg_put_unspec(buf
, OVS_PACKET_ATTR_PACKET
,
1820 dp_packet_data(d_exec
->packet
),
1821 dp_packet_size(d_exec
->packet
));
1823 key_ofs
= nl_msg_start_nested(buf
, OVS_PACKET_ATTR_KEY
);
1824 odp_key_from_dp_packet(buf
, d_exec
->packet
);
1825 nl_msg_end_nested(buf
, key_ofs
);
1827 nl_msg_put_unspec(buf
, OVS_PACKET_ATTR_ACTIONS
,
1828 d_exec
->actions
, d_exec
->actions_len
);
1829 if (d_exec
->probe
) {
1830 nl_msg_put_flag(buf
, OVS_PACKET_ATTR_PROBE
);
1833 nl_msg_put_u16(buf
, OVS_PACKET_ATTR_MRU
, d_exec
->mtu
);
1837 nl_msg_put_u64(buf
, OVS_PACKET_ATTR_HASH
, d_exec
->hash
);
1841 /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
1842 * Returns the number actually executed (at least 1, if 'n_ops' is
1845 dpif_netlink_operate__(struct dpif_netlink
*dpif
,
1846 struct dpif_op
**ops
, size_t n_ops
)
1849 struct nl_transaction txn
;
1851 struct ofpbuf request
;
1852 uint64_t request_stub
[1024 / 8];
1854 struct ofpbuf reply
;
1855 uint64_t reply_stub
[1024 / 8];
1856 } auxes
[OPERATE_MAX_OPS
];
1858 struct nl_transaction
*txnsp
[OPERATE_MAX_OPS
];
1861 n_ops
= MIN(n_ops
, OPERATE_MAX_OPS
);
1862 for (i
= 0; i
< n_ops
; i
++) {
1863 struct op_auxdata
*aux
= &auxes
[i
];
1864 struct dpif_op
*op
= ops
[i
];
1865 struct dpif_flow_put
*put
;
1866 struct dpif_flow_del
*del
;
1867 struct dpif_flow_get
*get
;
1868 struct dpif_netlink_flow flow
;
1870 ofpbuf_use_stub(&aux
->request
,
1871 aux
->request_stub
, sizeof aux
->request_stub
);
1872 aux
->txn
.request
= &aux
->request
;
1874 ofpbuf_use_stub(&aux
->reply
, aux
->reply_stub
, sizeof aux
->reply_stub
);
1875 aux
->txn
.reply
= NULL
;
1878 case DPIF_OP_FLOW_PUT
:
1879 put
= &op
->flow_put
;
1880 dpif_netlink_init_flow_put(dpif
, put
, &flow
);
1882 flow
.nlmsg_flags
|= NLM_F_ECHO
;
1883 aux
->txn
.reply
= &aux
->reply
;
1885 dpif_netlink_flow_to_ofpbuf(&flow
, &aux
->request
);
1888 case DPIF_OP_FLOW_DEL
:
1889 del
= &op
->flow_del
;
1890 dpif_netlink_init_flow_del(dpif
, del
, &flow
);
1892 flow
.nlmsg_flags
|= NLM_F_ECHO
;
1893 aux
->txn
.reply
= &aux
->reply
;
1895 dpif_netlink_flow_to_ofpbuf(&flow
, &aux
->request
);
1898 case DPIF_OP_EXECUTE
:
1899 /* Can't execute a packet that won't fit in a Netlink attribute. */
1900 if (OVS_UNLIKELY(nl_attr_oversized(
1901 dp_packet_size(op
->execute
.packet
)))) {
1902 /* Report an error immediately if this is the first operation.
1903 * Otherwise the easiest thing to do is to postpone to the next
1904 * call (when this will be the first operation). */
1906 VLOG_ERR_RL(&error_rl
,
1907 "dropping oversized %"PRIu32
"-byte packet",
1908 dp_packet_size(op
->execute
.packet
));
1909 op
->error
= ENOBUFS
;
1914 dpif_netlink_encode_execute(dpif
->dp_ifindex
, &op
->execute
,
1919 case DPIF_OP_FLOW_GET
:
1920 get
= &op
->flow_get
;
1921 dpif_netlink_init_flow_get(dpif
, get
, &flow
);
1922 aux
->txn
.reply
= get
->buffer
;
1923 dpif_netlink_flow_to_ofpbuf(&flow
, &aux
->request
);
1931 for (i
= 0; i
< n_ops
; i
++) {
1932 txnsp
[i
] = &auxes
[i
].txn
;
1934 nl_transact_multiple(NETLINK_GENERIC
, txnsp
, n_ops
);
1936 for (i
= 0; i
< n_ops
; i
++) {
1937 struct op_auxdata
*aux
= &auxes
[i
];
1938 struct nl_transaction
*txn
= &auxes
[i
].txn
;
1939 struct dpif_op
*op
= ops
[i
];
1940 struct dpif_flow_put
*put
;
1941 struct dpif_flow_del
*del
;
1942 struct dpif_flow_get
*get
;
1944 op
->error
= txn
->error
;
1947 case DPIF_OP_FLOW_PUT
:
1948 put
= &op
->flow_put
;
1951 struct dpif_netlink_flow reply
;
1953 op
->error
= dpif_netlink_flow_from_ofpbuf(&reply
,
1956 dpif_netlink_flow_get_stats(&reply
, put
->stats
);
1962 case DPIF_OP_FLOW_DEL
:
1963 del
= &op
->flow_del
;
1966 struct dpif_netlink_flow reply
;
1968 op
->error
= dpif_netlink_flow_from_ofpbuf(&reply
,
1971 dpif_netlink_flow_get_stats(&reply
, del
->stats
);
1977 case DPIF_OP_EXECUTE
:
1980 case DPIF_OP_FLOW_GET
:
1981 get
= &op
->flow_get
;
1983 struct dpif_netlink_flow reply
;
1985 op
->error
= dpif_netlink_flow_from_ofpbuf(&reply
, txn
->reply
);
1987 dpif_netlink_flow_to_dpif_flow(get
->flow
, &reply
);
1996 ofpbuf_uninit(&aux
->request
);
1997 ofpbuf_uninit(&aux
->reply
);
2004 parse_flow_get(struct dpif_netlink
*dpif
, struct dpif_flow_get
*get
)
2006 const char *dpif_type_str
= dpif_normalize_type(dpif_type(&dpif
->dpif
));
2007 struct dpif_flow
*dpif_flow
= get
->flow
;
2009 struct nlattr
*actions
;
2010 struct dpif_flow_stats stats
;
2011 struct dpif_flow_attrs attrs
;
2013 uint64_t act_buf
[1024 / 8];
2014 struct odputil_keybuf maskbuf
;
2015 struct odputil_keybuf keybuf
;
2016 struct odputil_keybuf actbuf
;
2017 struct ofpbuf key
, mask
, act
;
2020 ofpbuf_use_stack(&buf
, &act_buf
, sizeof act_buf
);
2021 err
= netdev_ports_flow_get(dpif_type_str
, &match
, &actions
, get
->ufid
,
2022 &stats
, &attrs
, &buf
);
2027 VLOG_DBG("found flow from netdev, translating to dpif flow");
2029 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
2030 ofpbuf_use_stack(&act
, &actbuf
, sizeof actbuf
);
2031 ofpbuf_use_stack(&mask
, &maskbuf
, sizeof maskbuf
);
2032 dpif_netlink_netdev_match_to_dpif_flow(&match
, &key
, &mask
, actions
,
2034 (ovs_u128
*) get
->ufid
,
2037 ofpbuf_put(get
->buffer
, nl_attr_get(actions
), nl_attr_get_size(actions
));
2038 dpif_flow
->actions
= ofpbuf_at(get
->buffer
, 0, 0);
2039 dpif_flow
->actions_len
= nl_attr_get_size(actions
);
2045 parse_flow_put(struct dpif_netlink
*dpif
, struct dpif_flow_put
*put
)
2047 const char *dpif_type_str
= dpif_normalize_type(dpif_type(&dpif
->dpif
));
2048 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
2051 const struct nlattr
*nla
;
2054 struct offload_info info
;
2055 ovs_be16 dst_port
= 0;
2056 uint8_t csum_on
= false;
2059 if (put
->flags
& DPIF_FP_PROBE
) {
2063 err
= parse_key_and_mask_to_match(put
->key
, put
->key_len
, put
->mask
,
2064 put
->mask_len
, &match
);
2069 in_port
= match
.flow
.in_port
.odp_port
;
2070 dev
= netdev_ports_get(in_port
, dpif_type_str
);
2075 /* Get tunnel dst port */
2076 NL_ATTR_FOR_EACH(nla
, left
, put
->actions
, put
->actions_len
) {
2077 if (nl_attr_type(nla
) == OVS_ACTION_ATTR_OUTPUT
) {
2078 const struct netdev_tunnel_config
*tnl_cfg
;
2079 struct netdev
*outdev
;
2080 odp_port_t out_port
;
2082 out_port
= nl_attr_get_odp_port(nla
);
2083 outdev
= netdev_ports_get(out_port
, dpif_type_str
);
2088 tnl_cfg
= netdev_get_tunnel_config(outdev
);
2089 if (tnl_cfg
&& tnl_cfg
->dst_port
!= 0) {
2090 dst_port
= tnl_cfg
->dst_port
;
2093 csum_on
= tnl_cfg
->csum
;
2095 netdev_close(outdev
);
2099 info
.tp_dst_port
= dst_port
;
2100 info
.tunnel_csum_on
= csum_on
;
2101 info
.recirc_id_shared_with_tc
= (dpif
->user_features
2102 & OVS_DP_F_TC_RECIRC_SHARING
);
2103 info
.tc_modify_flow_deleted
= false;
2104 err
= netdev_flow_put(dev
, &match
,
2105 CONST_CAST(struct nlattr
*, put
->actions
),
2107 CONST_CAST(ovs_u128
*, put
->ufid
),
2111 if (put
->flags
& DPIF_FP_MODIFY
) {
2112 struct dpif_op
*opp
;
2115 op
.type
= DPIF_OP_FLOW_DEL
;
2116 op
.flow_del
.key
= put
->key
;
2117 op
.flow_del
.key_len
= put
->key_len
;
2118 op
.flow_del
.ufid
= put
->ufid
;
2119 op
.flow_del
.pmd_id
= put
->pmd_id
;
2120 op
.flow_del
.stats
= NULL
;
2121 op
.flow_del
.terse
= false;
2124 dpif_netlink_operate__(dpif
, &opp
, 1);
2127 VLOG_DBG("added flow");
2128 } else if (err
!= EEXIST
) {
2129 struct netdev
*oor_netdev
= NULL
;
2130 enum vlog_level level
;
2131 if (err
== ENOSPC
&& netdev_is_offload_rebalance_policy_enabled()) {
2133 * We need to set OOR on the input netdev (i.e, 'dev') for the
2134 * flow. But if the flow has a tunnel attribute (i.e, decap action,
2135 * with a virtual device like a VxLAN interface as its in-port),
2136 * then lookup and set OOR on the underlying tunnel (real) netdev.
2138 oor_netdev
= flow_get_tunnel_netdev(&match
.flow
.tunnel
);
2140 /* Not a 'tunnel' flow */
2143 netdev_set_hw_info(oor_netdev
, HW_INFO_TYPE_OOR
, true);
2145 level
= (err
== ENOSPC
|| err
== EOPNOTSUPP
) ? VLL_DBG
: VLL_ERR
;
2146 VLOG_RL(&rl
, level
, "failed to offload flow: %s: %s",
2148 (oor_netdev
? oor_netdev
->name
: dev
->name
));
2152 if (err
&& err
!= EEXIST
&& (put
->flags
& DPIF_FP_MODIFY
)) {
2153 /* Modified rule can't be offloaded, try and delete from HW */
2156 if (!info
.tc_modify_flow_deleted
) {
2157 del_err
= netdev_flow_del(dev
, put
->ufid
, put
->stats
);
2161 /* Delete from hw success, so old flow was offloaded.
2162 * Change flags to create the flow in kernel */
2163 put
->flags
&= ~DPIF_FP_MODIFY
;
2164 put
->flags
|= DPIF_FP_CREATE
;
2165 } else if (del_err
!= ENOENT
) {
2166 VLOG_ERR_RL(&rl
, "failed to delete offloaded flow: %s",
2167 ovs_strerror(del_err
));
2168 /* stop proccesing the flow in kernel */
2179 try_send_to_netdev(struct dpif_netlink
*dpif
, struct dpif_op
*op
)
2181 int err
= EOPNOTSUPP
;
2184 case DPIF_OP_FLOW_PUT
: {
2185 struct dpif_flow_put
*put
= &op
->flow_put
;
2191 err
= parse_flow_put(dpif
, put
);
2192 log_flow_put_message(&dpif
->dpif
, &this_module
, put
, 0);
2195 case DPIF_OP_FLOW_DEL
: {
2196 struct dpif_flow_del
*del
= &op
->flow_del
;
2202 err
= netdev_ports_flow_del(
2203 dpif_normalize_type(dpif_type(&dpif
->dpif
)),
2206 log_flow_del_message(&dpif
->dpif
, &this_module
, del
, 0);
2209 case DPIF_OP_FLOW_GET
: {
2210 struct dpif_flow_get
*get
= &op
->flow_get
;
2212 if (!op
->flow_get
.ufid
) {
2216 err
= parse_flow_get(dpif
, get
);
2217 log_flow_get_message(&dpif
->dpif
, &this_module
, get
, 0);
2220 case DPIF_OP_EXECUTE
:
2229 dpif_netlink_operate_chunks(struct dpif_netlink
*dpif
, struct dpif_op
**ops
,
2233 size_t chunk
= dpif_netlink_operate__(dpif
, ops
, n_ops
);
2241 dpif_netlink_operate(struct dpif
*dpif_
, struct dpif_op
**ops
, size_t n_ops
,
2242 enum dpif_offload_type offload_type
)
2244 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2245 struct dpif_op
*new_ops
[OPERATE_MAX_OPS
];
2250 if (offload_type
== DPIF_OFFLOAD_ALWAYS
&& !netdev_is_flow_api_enabled()) {
2251 VLOG_DBG("Invalid offload_type: %d", offload_type
);
2255 if (offload_type
!= DPIF_OFFLOAD_NEVER
&& netdev_is_flow_api_enabled()) {
2259 while (n_ops
> 0 && count
< OPERATE_MAX_OPS
) {
2260 struct dpif_op
*op
= ops
[i
++];
2262 err
= try_send_to_netdev(dpif
, op
);
2263 if (err
&& err
!= EEXIST
) {
2264 if (offload_type
== DPIF_OFFLOAD_ALWAYS
) {
2265 /* We got an error while offloading an op. Since
2266 * OFFLOAD_ALWAYS is specified, we stop further
2267 * processing and return to the caller without
2268 * invoking kernel datapath as fallback. But the
2269 * interface requires us to process all n_ops; so
2270 * return the same error in the remaining ops too.
2281 new_ops
[count
++] = op
;
2289 dpif_netlink_operate_chunks(dpif
, new_ops
, count
);
2291 } else if (offload_type
!= DPIF_OFFLOAD_ALWAYS
) {
2292 dpif_netlink_operate_chunks(dpif
, ops
, n_ops
);
2298 dpif_netlink_handler_uninit(struct dpif_handler
*handler
)
2300 vport_delete_sock_pool(handler
);
2304 dpif_netlink_handler_init(struct dpif_handler
*handler
)
2306 return vport_create_sock_pool(handler
);
2311 dpif_netlink_handler_init(struct dpif_handler
*handler
)
2313 handler
->epoll_fd
= epoll_create(10);
2314 return handler
->epoll_fd
< 0 ? errno
: 0;
2318 dpif_netlink_handler_uninit(struct dpif_handler
*handler
)
2320 close(handler
->epoll_fd
);
2324 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
2325 * currently in 'dpif' in the kernel, by adding a new set of channels for
2326 * any kernel vport that lacks one and deleting any channels that have no
2327 * backing kernel vports. */
2329 dpif_netlink_refresh_channels(struct dpif_netlink
*dpif
, uint32_t n_handlers
)
2330 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
2332 unsigned long int *keep_channels
;
2333 struct dpif_netlink_vport vport
;
2334 size_t keep_channels_nbits
;
2335 struct nl_dump dump
;
2336 uint64_t reply_stub
[NL_DUMP_BUFSIZE
/ 8];
2341 ovs_assert(!WINDOWS
|| n_handlers
<= 1);
2342 ovs_assert(!WINDOWS
|| dpif
->n_handlers
<= 1);
2344 if (dpif
->n_handlers
!= n_handlers
) {
2345 destroy_all_channels(dpif
);
2346 dpif
->handlers
= xzalloc(n_handlers
* sizeof *dpif
->handlers
);
2347 for (i
= 0; i
< n_handlers
; i
++) {
2349 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
2351 error
= dpif_netlink_handler_init(handler
);
2355 for (j
= 0; j
< i
; j
++) {
2356 struct dpif_handler
*tmp
= &dpif
->handlers
[j
];
2357 dpif_netlink_handler_uninit(tmp
);
2359 free(dpif
->handlers
);
2360 dpif
->handlers
= NULL
;
2365 dpif
->n_handlers
= n_handlers
;
2368 for (i
= 0; i
< n_handlers
; i
++) {
2369 struct dpif_handler
*handler
= &dpif
->handlers
[i
];
2371 handler
->event_offset
= handler
->n_events
= 0;
2374 keep_channels_nbits
= dpif
->uc_array_size
;
2375 keep_channels
= bitmap_allocate(keep_channels_nbits
);
2377 ofpbuf_use_stub(&buf
, reply_stub
, sizeof reply_stub
);
2378 dpif_netlink_port_dump_start__(dpif
, &dump
);
2379 while (!dpif_netlink_port_dump_next__(dpif
, &dump
, &vport
, &buf
)) {
2380 uint32_t port_no
= odp_to_u32(vport
.port_no
);
2381 uint32_t upcall_pid
;
2384 if (port_no
>= dpif
->uc_array_size
2385 || !vport_get_pid(dpif
, port_no
, &upcall_pid
)) {
2386 struct nl_sock
*sock
;
2387 error
= create_nl_sock(dpif
, &sock
);
2393 error
= vport_add_channel(dpif
, vport
.port_no
, sock
);
2395 VLOG_INFO("%s: could not add channels for port %s",
2396 dpif_name(&dpif
->dpif
), vport
.name
);
2397 nl_sock_destroy(sock
);
2401 upcall_pid
= nl_sock_pid(sock
);
2404 /* Configure the vport to deliver misses to 'sock'. */
2405 if (vport
.upcall_pids
[0] == 0
2406 || vport
.n_upcall_pids
!= 1
2407 || upcall_pid
!= vport
.upcall_pids
[0]) {
2408 struct dpif_netlink_vport vport_request
;
2410 dpif_netlink_vport_init(&vport_request
);
2411 vport_request
.cmd
= OVS_VPORT_CMD_SET
;
2412 vport_request
.dp_ifindex
= dpif
->dp_ifindex
;
2413 vport_request
.port_no
= vport
.port_no
;
2414 vport_request
.n_upcall_pids
= 1;
2415 vport_request
.upcall_pids
= &upcall_pid
;
2416 error
= dpif_netlink_vport_transact(&vport_request
, NULL
, NULL
);
2418 VLOG_WARN_RL(&error_rl
,
2419 "%s: failed to set upcall pid on port: %s",
2420 dpif_name(&dpif
->dpif
), ovs_strerror(error
));
2422 if (error
!= ENODEV
&& error
!= ENOENT
) {
2425 /* The vport isn't really there, even though the dump says
2426 * it is. Probably we just hit a race after a port
2433 if (port_no
< keep_channels_nbits
) {
2434 bitmap_set1(keep_channels
, port_no
);
2439 vport_del_channels(dpif
, vport
.port_no
);
2441 nl_dump_done(&dump
);
2442 ofpbuf_uninit(&buf
);
2444 /* Discard any saved channels that we didn't reuse. */
2445 for (i
= 0; i
< keep_channels_nbits
; i
++) {
2446 if (!bitmap_is_set(keep_channels
, i
)) {
2447 vport_del_channels(dpif
, u32_to_odp(i
));
2450 free(keep_channels
);
2456 dpif_netlink_recv_set__(struct dpif_netlink
*dpif
, bool enable
)
2457 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
2459 if ((dpif
->handlers
!= NULL
) == enable
) {
2461 } else if (!enable
) {
2462 destroy_all_channels(dpif
);
2465 return dpif_netlink_refresh_channels(dpif
, 1);
2470 dpif_netlink_recv_set(struct dpif
*dpif_
, bool enable
)
2472 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2475 fat_rwlock_wrlock(&dpif
->upcall_lock
);
2476 error
= dpif_netlink_recv_set__(dpif
, enable
);
2477 fat_rwlock_unlock(&dpif
->upcall_lock
);
2483 dpif_netlink_handlers_set(struct dpif
*dpif_
, uint32_t n_handlers
)
2485 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2489 /* Multiple upcall handlers will be supported once kernel datapath supports
2491 if (n_handlers
> 1) {
2496 fat_rwlock_wrlock(&dpif
->upcall_lock
);
2497 if (dpif
->handlers
) {
2498 error
= dpif_netlink_refresh_channels(dpif
, n_handlers
);
2500 fat_rwlock_unlock(&dpif
->upcall_lock
);
2506 dpif_netlink_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2507 uint32_t queue_id
, uint32_t *priority
)
2509 if (queue_id
< 0xf000) {
2510 *priority
= TC_H_MAKE(1 << 16, queue_id
+ 1);
2518 parse_odp_packet(struct ofpbuf
*buf
, struct dpif_upcall
*upcall
,
2521 static const struct nl_policy ovs_packet_policy
[] = {
2522 /* Always present. */
2523 [OVS_PACKET_ATTR_PACKET
] = { .type
= NL_A_UNSPEC
,
2524 .min_len
= ETH_HEADER_LEN
},
2525 [OVS_PACKET_ATTR_KEY
] = { .type
= NL_A_NESTED
},
2527 /* OVS_PACKET_CMD_ACTION only. */
2528 [OVS_PACKET_ATTR_USERDATA
] = { .type
= NL_A_UNSPEC
, .optional
= true },
2529 [OVS_PACKET_ATTR_EGRESS_TUN_KEY
] = { .type
= NL_A_NESTED
, .optional
= true },
2530 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NL_A_NESTED
, .optional
= true },
2531 [OVS_PACKET_ATTR_MRU
] = { .type
= NL_A_U16
, .optional
= true },
2532 [OVS_PACKET_ATTR_HASH
] = { .type
= NL_A_U64
, .optional
= true }
2535 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
2536 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
2537 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
2538 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
2540 struct nlattr
*a
[ARRAY_SIZE(ovs_packet_policy
)];
2541 if (!nlmsg
|| !genl
|| !ovs_header
2542 || nlmsg
->nlmsg_type
!= ovs_packet_family
2543 || !nl_policy_parse(&b
, 0, ovs_packet_policy
, a
,
2544 ARRAY_SIZE(ovs_packet_policy
))) {
2548 int type
= (genl
->cmd
== OVS_PACKET_CMD_MISS
? DPIF_UC_MISS
2549 : genl
->cmd
== OVS_PACKET_CMD_ACTION
? DPIF_UC_ACTION
2555 /* (Re)set ALL fields of '*upcall' on successful return. */
2556 upcall
->type
= type
;
2557 upcall
->key
= CONST_CAST(struct nlattr
*,
2558 nl_attr_get(a
[OVS_PACKET_ATTR_KEY
]));
2559 upcall
->key_len
= nl_attr_get_size(a
[OVS_PACKET_ATTR_KEY
]);
2560 odp_flow_key_hash(upcall
->key
, upcall
->key_len
, &upcall
->ufid
);
2561 upcall
->userdata
= a
[OVS_PACKET_ATTR_USERDATA
];
2562 upcall
->out_tun_key
= a
[OVS_PACKET_ATTR_EGRESS_TUN_KEY
];
2563 upcall
->actions
= a
[OVS_PACKET_ATTR_ACTIONS
];
2564 upcall
->mru
= a
[OVS_PACKET_ATTR_MRU
];
2565 upcall
->hash
= a
[OVS_PACKET_ATTR_HASH
];
2567 /* Allow overwriting the netlink attribute header without reallocating. */
2568 dp_packet_use_stub(&upcall
->packet
,
2569 CONST_CAST(struct nlattr
*,
2570 nl_attr_get(a
[OVS_PACKET_ATTR_PACKET
])) - 1,
2571 nl_attr_get_size(a
[OVS_PACKET_ATTR_PACKET
]) +
2572 sizeof(struct nlattr
));
2573 dp_packet_set_data(&upcall
->packet
,
2574 (char *)dp_packet_data(&upcall
->packet
) + sizeof(struct nlattr
));
2575 dp_packet_set_size(&upcall
->packet
, nl_attr_get_size(a
[OVS_PACKET_ATTR_PACKET
]));
2577 if (nl_attr_find__(upcall
->key
, upcall
->key_len
, OVS_KEY_ATTR_ETHERNET
)) {
2578 /* Ethernet frame */
2579 upcall
->packet
.packet_type
= htonl(PT_ETH
);
2581 /* Non-Ethernet packet. Get the Ethertype from the NL attributes */
2582 ovs_be16 ethertype
= 0;
2583 const struct nlattr
*et_nla
= nl_attr_find__(upcall
->key
,
2585 OVS_KEY_ATTR_ETHERTYPE
);
2587 ethertype
= nl_attr_get_be16(et_nla
);
2589 upcall
->packet
.packet_type
= PACKET_TYPE_BE(OFPHTN_ETHERTYPE
,
2591 dp_packet_set_l3(&upcall
->packet
, dp_packet_data(&upcall
->packet
));
2594 *dp_ifindex
= ovs_header
->dp_ifindex
;
2600 #define PACKET_RECV_BATCH_SIZE 50
2602 dpif_netlink_recv_windows(struct dpif_netlink
*dpif
, uint32_t handler_id
,
2603 struct dpif_upcall
*upcall
, struct ofpbuf
*buf
)
2604 OVS_REQ_RDLOCK(dpif
->upcall_lock
)
2606 struct dpif_handler
*handler
;
2608 struct dpif_windows_vport_sock
*sock_pool
;
2611 if (!dpif
->handlers
) {
2615 /* Only one handler is supported currently. */
2616 if (handler_id
>= 1) {
2620 if (handler_id
>= dpif
->n_handlers
) {
2624 handler
= &dpif
->handlers
[handler_id
];
2625 sock_pool
= handler
->vport_sock_pool
;
2627 for (i
= 0; i
< VPORT_SOCK_POOL_SIZE
; i
++) {
2632 if (++read_tries
> PACKET_RECV_BATCH_SIZE
) {
2636 error
= nl_sock_recv(sock_pool
[i
].nl_sock
, buf
, NULL
, false);
2637 if (error
== ENOBUFS
) {
2638 /* ENOBUFS typically means that we've received so many
2639 * packets that the buffer overflowed. Try again
2640 * immediately because there's almost certainly a packet
2641 * waiting for us. */
2642 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2646 /* XXX: ch->last_poll = time_msec(); */
2648 if (error
== EAGAIN
) {
2654 error
= parse_odp_packet(buf
, upcall
, &dp_ifindex
);
2655 if (!error
&& dp_ifindex
== dpif
->dp_ifindex
) {
2667 dpif_netlink_recv__(struct dpif_netlink
*dpif
, uint32_t handler_id
,
2668 struct dpif_upcall
*upcall
, struct ofpbuf
*buf
)
2669 OVS_REQ_RDLOCK(dpif
->upcall_lock
)
2671 struct dpif_handler
*handler
;
2674 if (!dpif
->handlers
|| handler_id
>= dpif
->n_handlers
) {
2678 handler
= &dpif
->handlers
[handler_id
];
2679 if (handler
->event_offset
>= handler
->n_events
) {
2682 handler
->event_offset
= handler
->n_events
= 0;
2685 retval
= epoll_wait(handler
->epoll_fd
, handler
->epoll_events
,
2686 dpif
->uc_array_size
, 0);
2687 } while (retval
< 0 && errno
== EINTR
);
2690 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
2691 VLOG_WARN_RL(&rl
, "epoll_wait failed (%s)", ovs_strerror(errno
));
2692 } else if (retval
> 0) {
2693 handler
->n_events
= retval
;
2697 while (handler
->event_offset
< handler
->n_events
) {
2698 int idx
= handler
->epoll_events
[handler
->event_offset
].data
.u32
;
2699 struct dpif_channel
*ch
= &dpif
->channels
[idx
];
2701 handler
->event_offset
++;
2707 if (++read_tries
> 50) {
2711 error
= nl_sock_recv(ch
->sock
, buf
, NULL
, false);
2712 if (error
== ENOBUFS
) {
2713 /* ENOBUFS typically means that we've received so many
2714 * packets that the buffer overflowed. Try again
2715 * immediately because there's almost certainly a packet
2716 * waiting for us. */
2717 report_loss(dpif
, ch
, idx
, handler_id
);
2721 ch
->last_poll
= time_msec();
2723 if (error
== EAGAIN
) {
2729 error
= parse_odp_packet(buf
, upcall
, &dp_ifindex
);
2730 if (!error
&& dp_ifindex
== dpif
->dp_ifindex
) {
2743 dpif_netlink_recv(struct dpif
*dpif_
, uint32_t handler_id
,
2744 struct dpif_upcall
*upcall
, struct ofpbuf
*buf
)
2746 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2749 fat_rwlock_rdlock(&dpif
->upcall_lock
);
2751 error
= dpif_netlink_recv_windows(dpif
, handler_id
, upcall
, buf
);
2753 error
= dpif_netlink_recv__(dpif
, handler_id
, upcall
, buf
);
2755 fat_rwlock_unlock(&dpif
->upcall_lock
);
2761 dpif_netlink_recv_wait__(struct dpif_netlink
*dpif
, uint32_t handler_id
)
2762 OVS_REQ_RDLOCK(dpif
->upcall_lock
)
2766 struct dpif_windows_vport_sock
*sock_pool
=
2767 dpif
->handlers
[handler_id
].vport_sock_pool
;
2769 /* Only one handler is supported currently. */
2770 if (handler_id
>= 1) {
2774 for (i
= 0; i
< VPORT_SOCK_POOL_SIZE
; i
++) {
2775 nl_sock_wait(sock_pool
[i
].nl_sock
, POLLIN
);
2778 if (dpif
->handlers
&& handler_id
< dpif
->n_handlers
) {
2779 struct dpif_handler
*handler
= &dpif
->handlers
[handler_id
];
2781 poll_fd_wait(handler
->epoll_fd
, POLLIN
);
2787 dpif_netlink_recv_wait(struct dpif
*dpif_
, uint32_t handler_id
)
2789 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2791 fat_rwlock_rdlock(&dpif
->upcall_lock
);
2792 dpif_netlink_recv_wait__(dpif
, handler_id
);
2793 fat_rwlock_unlock(&dpif
->upcall_lock
);
2797 dpif_netlink_recv_purge__(struct dpif_netlink
*dpif
)
2798 OVS_REQ_WRLOCK(dpif
->upcall_lock
)
2800 if (dpif
->handlers
) {
2803 if (!dpif
->channels
[0].sock
) {
2806 for (i
= 0; i
< dpif
->uc_array_size
; i
++ ) {
2808 nl_sock_drain(dpif
->channels
[i
].sock
);
2814 dpif_netlink_recv_purge(struct dpif
*dpif_
)
2816 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
2818 fat_rwlock_wrlock(&dpif
->upcall_lock
);
2819 dpif_netlink_recv_purge__(dpif
);
2820 fat_rwlock_unlock(&dpif
->upcall_lock
);
2824 dpif_netlink_get_datapath_version(void)
2826 char *version_str
= NULL
;
2830 #define MAX_VERSION_STR_SIZE 80
2831 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2834 f
= fopen(LINUX_DATAPATH_VERSION_FILE
, "r");
2837 char version
[MAX_VERSION_STR_SIZE
];
2839 if (fgets(version
, MAX_VERSION_STR_SIZE
, f
)) {
2840 newline
= strchr(version
, '\n');
2844 version_str
= xstrdup(version
);
2853 struct dpif_netlink_ct_dump_state
{
2854 struct ct_dpif_dump_state up
;
2855 struct nl_ct_dump_state
*nl_ct_dump
;
2859 dpif_netlink_ct_dump_start(struct dpif
*dpif OVS_UNUSED
,
2860 struct ct_dpif_dump_state
**dump_
,
2861 const uint16_t *zone
, int *ptot_bkts
)
2863 struct dpif_netlink_ct_dump_state
*dump
;
2866 dump
= xzalloc(sizeof *dump
);
2867 err
= nl_ct_dump_start(&dump
->nl_ct_dump
, zone
, ptot_bkts
);
2879 dpif_netlink_ct_dump_next(struct dpif
*dpif OVS_UNUSED
,
2880 struct ct_dpif_dump_state
*dump_
,
2881 struct ct_dpif_entry
*entry
)
2883 struct dpif_netlink_ct_dump_state
*dump
;
2885 INIT_CONTAINER(dump
, dump_
, up
);
2887 return nl_ct_dump_next(dump
->nl_ct_dump
, entry
);
2891 dpif_netlink_ct_dump_done(struct dpif
*dpif OVS_UNUSED
,
2892 struct ct_dpif_dump_state
*dump_
)
2894 struct dpif_netlink_ct_dump_state
*dump
;
2896 INIT_CONTAINER(dump
, dump_
, up
);
2898 int err
= nl_ct_dump_done(dump
->nl_ct_dump
);
2904 dpif_netlink_ct_flush(struct dpif
*dpif OVS_UNUSED
, const uint16_t *zone
,
2905 const struct ct_dpif_tuple
*tuple
)
2908 return nl_ct_flush_tuple(tuple
, zone
? *zone
: 0);
2910 return nl_ct_flush_zone(*zone
);
2912 return nl_ct_flush();
2917 dpif_netlink_ct_set_limits(struct dpif
*dpif OVS_UNUSED
,
2918 const uint32_t *default_limits
,
2919 const struct ovs_list
*zone_limits
)
2921 struct ovs_zone_limit req_zone_limit
;
2923 if (ovs_ct_limit_family
< 0) {
2927 struct ofpbuf
*request
= ofpbuf_new(NL_DUMP_BUFSIZE
);
2928 nl_msg_put_genlmsghdr(request
, 0, ovs_ct_limit_family
,
2929 NLM_F_REQUEST
| NLM_F_ECHO
, OVS_CT_LIMIT_CMD_SET
,
2930 OVS_CT_LIMIT_VERSION
);
2932 struct ovs_header
*ovs_header
;
2933 ovs_header
= ofpbuf_put_uninit(request
, sizeof *ovs_header
);
2934 ovs_header
->dp_ifindex
= 0;
2937 opt_offset
= nl_msg_start_nested(request
, OVS_CT_LIMIT_ATTR_ZONE_LIMIT
);
2938 if (default_limits
) {
2939 req_zone_limit
.zone_id
= OVS_ZONE_LIMIT_DEFAULT_ZONE
;
2940 req_zone_limit
.limit
= *default_limits
;
2941 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
2944 if (!ovs_list_is_empty(zone_limits
)) {
2945 struct ct_dpif_zone_limit
*zone_limit
;
2947 LIST_FOR_EACH (zone_limit
, node
, zone_limits
) {
2948 req_zone_limit
.zone_id
= zone_limit
->zone
;
2949 req_zone_limit
.limit
= zone_limit
->limit
;
2950 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
2953 nl_msg_end_nested(request
, opt_offset
);
2955 int err
= nl_transact(NETLINK_GENERIC
, request
, NULL
);
2956 ofpbuf_delete(request
);
2961 dpif_netlink_zone_limits_from_ofpbuf(const struct ofpbuf
*buf
,
2962 uint32_t *default_limit
,
2963 struct ovs_list
*zone_limits
)
2965 static const struct nl_policy ovs_ct_limit_policy
[] = {
2966 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT
] = { .type
= NL_A_NESTED
,
2970 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
2971 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
2972 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
2973 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
2975 struct nlattr
*attr
[ARRAY_SIZE(ovs_ct_limit_policy
)];
2977 if (!nlmsg
|| !genl
|| !ovs_header
2978 || nlmsg
->nlmsg_type
!= ovs_ct_limit_family
2979 || !nl_policy_parse(&b
, 0, ovs_ct_limit_policy
, attr
,
2980 ARRAY_SIZE(ovs_ct_limit_policy
))) {
2985 if (!attr
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]) {
2989 int rem
= NLA_ALIGN(
2990 nl_attr_get_size(attr
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]));
2991 const struct ovs_zone_limit
*zone_limit
=
2992 nl_attr_get(attr
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]);
2994 while (rem
>= sizeof *zone_limit
) {
2995 if (zone_limit
->zone_id
== OVS_ZONE_LIMIT_DEFAULT_ZONE
) {
2996 *default_limit
= zone_limit
->limit
;
2997 } else if (zone_limit
->zone_id
< OVS_ZONE_LIMIT_DEFAULT_ZONE
||
2998 zone_limit
->zone_id
> UINT16_MAX
) {
3000 ct_dpif_push_zone_limit(zone_limits
, zone_limit
->zone_id
,
3001 zone_limit
->limit
, zone_limit
->count
);
3003 rem
-= NLA_ALIGN(sizeof *zone_limit
);
3004 zone_limit
= ALIGNED_CAST(struct ovs_zone_limit
*,
3005 (unsigned char *) zone_limit
+ NLA_ALIGN(sizeof *zone_limit
));
3011 dpif_netlink_ct_get_limits(struct dpif
*dpif OVS_UNUSED
,
3012 uint32_t *default_limit
,
3013 const struct ovs_list
*zone_limits_request
,
3014 struct ovs_list
*zone_limits_reply
)
3016 if (ovs_ct_limit_family
< 0) {
3020 struct ofpbuf
*request
= ofpbuf_new(NL_DUMP_BUFSIZE
);
3021 nl_msg_put_genlmsghdr(request
, 0, ovs_ct_limit_family
,
3022 NLM_F_REQUEST
| NLM_F_ECHO
, OVS_CT_LIMIT_CMD_GET
,
3023 OVS_CT_LIMIT_VERSION
);
3025 struct ovs_header
*ovs_header
;
3026 ovs_header
= ofpbuf_put_uninit(request
, sizeof *ovs_header
);
3027 ovs_header
->dp_ifindex
= 0;
3029 if (!ovs_list_is_empty(zone_limits_request
)) {
3030 size_t opt_offset
= nl_msg_start_nested(request
,
3031 OVS_CT_LIMIT_ATTR_ZONE_LIMIT
);
3033 struct ovs_zone_limit req_zone_limit
;
3034 req_zone_limit
.zone_id
= OVS_ZONE_LIMIT_DEFAULT_ZONE
;
3035 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
3037 struct ct_dpif_zone_limit
*zone_limit
;
3038 LIST_FOR_EACH (zone_limit
, node
, zone_limits_request
) {
3039 req_zone_limit
.zone_id
= zone_limit
->zone
;
3040 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
3043 nl_msg_end_nested(request
, opt_offset
);
3046 struct ofpbuf
*reply
;
3047 int err
= nl_transact(NETLINK_GENERIC
, request
, &reply
);
3052 err
= dpif_netlink_zone_limits_from_ofpbuf(reply
, default_limit
,
3056 ofpbuf_delete(request
);
3057 ofpbuf_delete(reply
);
3062 dpif_netlink_ct_del_limits(struct dpif
*dpif OVS_UNUSED
,
3063 const struct ovs_list
*zone_limits
)
3065 if (ovs_ct_limit_family
< 0) {
3069 struct ofpbuf
*request
= ofpbuf_new(NL_DUMP_BUFSIZE
);
3070 nl_msg_put_genlmsghdr(request
, 0, ovs_ct_limit_family
,
3071 NLM_F_REQUEST
| NLM_F_ECHO
, OVS_CT_LIMIT_CMD_DEL
,
3072 OVS_CT_LIMIT_VERSION
);
3074 struct ovs_header
*ovs_header
;
3075 ovs_header
= ofpbuf_put_uninit(request
, sizeof *ovs_header
);
3076 ovs_header
->dp_ifindex
= 0;
3078 if (!ovs_list_is_empty(zone_limits
)) {
3080 nl_msg_start_nested(request
, OVS_CT_LIMIT_ATTR_ZONE_LIMIT
);
3082 struct ct_dpif_zone_limit
*zone_limit
;
3083 LIST_FOR_EACH (zone_limit
, node
, zone_limits
) {
3084 struct ovs_zone_limit req_zone_limit
;
3085 req_zone_limit
.zone_id
= zone_limit
->zone
;
3086 nl_msg_put(request
, &req_zone_limit
, sizeof req_zone_limit
);
3088 nl_msg_end_nested(request
, opt_offset
);
3091 int err
= nl_transact(NETLINK_GENERIC
, request
, NULL
);
3093 ofpbuf_delete(request
);
3097 #define NL_TP_NAME_PREFIX "ovs_tp_"
3099 struct dpif_netlink_timeout_policy_protocol
{
3104 enum OVS_PACKED_ENUM dpif_netlink_support_timeout_policy_protocol
{
3105 DPIF_NL_TP_AF_INET_TCP
,
3106 DPIF_NL_TP_AF_INET_UDP
,
3107 DPIF_NL_TP_AF_INET_ICMP
,
3108 DPIF_NL_TP_AF_INET6_TCP
,
3109 DPIF_NL_TP_AF_INET6_UDP
,
3110 DPIF_NL_TP_AF_INET6_ICMPV6
,
3114 #define DPIF_NL_ALL_TP ((1UL << DPIF_NL_TP_MAX) - 1)
3117 static struct dpif_netlink_timeout_policy_protocol tp_protos
[] = {
3118 [DPIF_NL_TP_AF_INET_TCP
] = { .l3num
= AF_INET
, .l4num
= IPPROTO_TCP
},
3119 [DPIF_NL_TP_AF_INET_UDP
] = { .l3num
= AF_INET
, .l4num
= IPPROTO_UDP
},
3120 [DPIF_NL_TP_AF_INET_ICMP
] = { .l3num
= AF_INET
, .l4num
= IPPROTO_ICMP
},
3121 [DPIF_NL_TP_AF_INET6_TCP
] = { .l3num
= AF_INET6
, .l4num
= IPPROTO_TCP
},
3122 [DPIF_NL_TP_AF_INET6_UDP
] = { .l3num
= AF_INET6
, .l4num
= IPPROTO_UDP
},
3123 [DPIF_NL_TP_AF_INET6_ICMPV6
] = { .l3num
= AF_INET6
,
3124 .l4num
= IPPROTO_ICMPV6
},
3128 dpif_netlink_format_tp_name(uint32_t id
, uint16_t l3num
, uint8_t l4num
,
3131 struct ds ds
= DS_EMPTY_INITIALIZER
;
3132 ds_put_format(&ds
, "%s%"PRIu32
"_", NL_TP_NAME_PREFIX
, id
);
3133 ct_dpif_format_ipproto(&ds
, l4num
);
3135 if (l3num
== AF_INET
) {
3136 ds_put_cstr(&ds
, "4");
3137 } else if (l3num
== AF_INET6
&& l4num
!= IPPROTO_ICMPV6
) {
3138 ds_put_cstr(&ds
, "6");
3141 ovs_assert(ds
.length
< CTNL_TIMEOUT_NAME_MAX
);
3143 *tp_name
= ds_steal_cstr(&ds
);
3147 dpif_netlink_ct_get_timeout_policy_name(struct dpif
*dpif OVS_UNUSED
,
3148 uint32_t tp_id
, uint16_t dl_type
,
3149 uint8_t nw_proto
, char **tp_name
,
3152 dpif_netlink_format_tp_name(tp_id
,
3153 dl_type
== ETH_TYPE_IP
? AF_INET
: AF_INET6
,
3155 *is_generic
= false;
3159 #define CT_DPIF_NL_TP_TCP_MAPPINGS \
3160 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT, SYN_SENT) \
3161 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_RECV, SYN_RECV) \
3162 CT_DPIF_NL_TP_MAPPING(TCP, TCP, ESTABLISHED, ESTABLISHED) \
3163 CT_DPIF_NL_TP_MAPPING(TCP, TCP, FIN_WAIT, FIN_WAIT) \
3164 CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE_WAIT, CLOSE_WAIT) \
3165 CT_DPIF_NL_TP_MAPPING(TCP, TCP, LAST_ACK, LAST_ACK) \
3166 CT_DPIF_NL_TP_MAPPING(TCP, TCP, TIME_WAIT, TIME_WAIT) \
3167 CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE, CLOSE) \
3168 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT2, SYN_SENT2) \
3169 CT_DPIF_NL_TP_MAPPING(TCP, TCP, RETRANSMIT, RETRANS) \
3170 CT_DPIF_NL_TP_MAPPING(TCP, TCP, UNACK, UNACK)
3172 #define CT_DPIF_NL_TP_UDP_MAPPINGS \
3173 CT_DPIF_NL_TP_MAPPING(UDP, UDP, SINGLE, UNREPLIED) \
3174 CT_DPIF_NL_TP_MAPPING(UDP, UDP, MULTIPLE, REPLIED)
3176 #define CT_DPIF_NL_TP_ICMP_MAPPINGS \
3177 CT_DPIF_NL_TP_MAPPING(ICMP, ICMP, FIRST, TIMEOUT)
3179 #define CT_DPIF_NL_TP_ICMPV6_MAPPINGS \
3180 CT_DPIF_NL_TP_MAPPING(ICMP, ICMPV6, FIRST, TIMEOUT)
3183 #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
3184 if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
3185 nl_tp->present |= 1 << CTA_TIMEOUT_##PROTO2##_##ATTR2; \
3186 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2] = \
3187 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]; \
3191 dpif_netlink_get_nl_tp_tcp_attrs(const struct ct_dpif_timeout_policy
*tp
,
3192 struct nl_ct_timeout_policy
*nl_tp
)
3194 CT_DPIF_NL_TP_TCP_MAPPINGS
3198 dpif_netlink_get_nl_tp_udp_attrs(const struct ct_dpif_timeout_policy
*tp
,
3199 struct nl_ct_timeout_policy
*nl_tp
)
3201 CT_DPIF_NL_TP_UDP_MAPPINGS
3205 dpif_netlink_get_nl_tp_icmp_attrs(const struct ct_dpif_timeout_policy
*tp
,
3206 struct nl_ct_timeout_policy
*nl_tp
)
3208 CT_DPIF_NL_TP_ICMP_MAPPINGS
3212 dpif_netlink_get_nl_tp_icmpv6_attrs(const struct ct_dpif_timeout_policy
*tp
,
3213 struct nl_ct_timeout_policy
*nl_tp
)
3215 CT_DPIF_NL_TP_ICMPV6_MAPPINGS
3218 #undef CT_DPIF_NL_TP_MAPPING
3221 dpif_netlink_get_nl_tp_attrs(const struct ct_dpif_timeout_policy
*tp
,
3222 uint8_t l4num
, struct nl_ct_timeout_policy
*nl_tp
)
3226 if (l4num
== IPPROTO_TCP
) {
3227 dpif_netlink_get_nl_tp_tcp_attrs(tp
, nl_tp
);
3228 } else if (l4num
== IPPROTO_UDP
) {
3229 dpif_netlink_get_nl_tp_udp_attrs(tp
, nl_tp
);
3230 } else if (l4num
== IPPROTO_ICMP
) {
3231 dpif_netlink_get_nl_tp_icmp_attrs(tp
, nl_tp
);
3232 } else if (l4num
== IPPROTO_ICMPV6
) {
3233 dpif_netlink_get_nl_tp_icmpv6_attrs(tp
, nl_tp
);
3237 #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
3238 if (nl_tp->present & (1 << CTA_TIMEOUT_##PROTO2##_##ATTR2)) { \
3239 if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
3240 if (tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] != \
3241 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]) { \
3242 VLOG_WARN_RL(&error_rl, "Inconsistent timeout policy %s " \
3243 "attribute %s=%"PRIu32" while %s=%"PRIu32, \
3244 nl_tp->name, "CTA_TIMEOUT_"#PROTO2"_"#ATTR2, \
3245 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2], \
3246 "CT_DPIF_TP_ATTR_"#PROTO1"_"#ATTR1, \
3247 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]); \
3250 tp->present |= 1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1; \
3251 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] = \
3252 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]; \
3257 dpif_netlink_set_ct_dpif_tp_tcp_attrs(const struct nl_ct_timeout_policy
*nl_tp
,
3258 struct ct_dpif_timeout_policy
*tp
)
3260 CT_DPIF_NL_TP_TCP_MAPPINGS
3264 dpif_netlink_set_ct_dpif_tp_udp_attrs(const struct nl_ct_timeout_policy
*nl_tp
,
3265 struct ct_dpif_timeout_policy
*tp
)
3267 CT_DPIF_NL_TP_UDP_MAPPINGS
3271 dpif_netlink_set_ct_dpif_tp_icmp_attrs(
3272 const struct nl_ct_timeout_policy
*nl_tp
,
3273 struct ct_dpif_timeout_policy
*tp
)
3275 CT_DPIF_NL_TP_ICMP_MAPPINGS
3279 dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(
3280 const struct nl_ct_timeout_policy
*nl_tp
,
3281 struct ct_dpif_timeout_policy
*tp
)
3283 CT_DPIF_NL_TP_ICMPV6_MAPPINGS
3286 #undef CT_DPIF_NL_TP_MAPPING
3289 dpif_netlink_set_ct_dpif_tp_attrs(const struct nl_ct_timeout_policy
*nl_tp
,
3290 struct ct_dpif_timeout_policy
*tp
)
3292 if (nl_tp
->l4num
== IPPROTO_TCP
) {
3293 dpif_netlink_set_ct_dpif_tp_tcp_attrs(nl_tp
, tp
);
3294 } else if (nl_tp
->l4num
== IPPROTO_UDP
) {
3295 dpif_netlink_set_ct_dpif_tp_udp_attrs(nl_tp
, tp
);
3296 } else if (nl_tp
->l4num
== IPPROTO_ICMP
) {
3297 dpif_netlink_set_ct_dpif_tp_icmp_attrs(nl_tp
, tp
);
3298 } else if (nl_tp
->l4num
== IPPROTO_ICMPV6
) {
3299 dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(nl_tp
, tp
);
3305 dpif_netlink_ct_set_timeout_policy(struct dpif
*dpif OVS_UNUSED
,
3306 const struct ct_dpif_timeout_policy
*tp
)
3312 dpif_netlink_ct_get_timeout_policy(struct dpif
*dpif OVS_UNUSED
,
3314 struct ct_dpif_timeout_policy
*tp
)
3320 dpif_netlink_ct_del_timeout_policy(struct dpif
*dpif OVS_UNUSED
,
3327 dpif_netlink_ct_timeout_policy_dump_start(struct dpif
*dpif OVS_UNUSED
,
3334 dpif_netlink_ct_timeout_policy_dump_next(struct dpif
*dpif OVS_UNUSED
,
3336 struct ct_dpif_timeout_policy
**tp
)
3342 dpif_netlink_ct_timeout_policy_dump_done(struct dpif
*dpif OVS_UNUSED
,
3349 dpif_netlink_ct_set_timeout_policy(struct dpif
*dpif OVS_UNUSED
,
3350 const struct ct_dpif_timeout_policy
*tp
)
3354 for (int i
= 0; i
< ARRAY_SIZE(tp_protos
); ++i
) {
3355 struct nl_ct_timeout_policy nl_tp
;
3358 dpif_netlink_format_tp_name(tp
->id
, tp_protos
[i
].l3num
,
3359 tp_protos
[i
].l4num
, &nl_tp_name
);
3360 ovs_strlcpy(nl_tp
.name
, nl_tp_name
, sizeof nl_tp
.name
);
3363 nl_tp
.l3num
= tp_protos
[i
].l3num
;
3364 nl_tp
.l4num
= tp_protos
[i
].l4num
;
3365 dpif_netlink_get_nl_tp_attrs(tp
, tp_protos
[i
].l4num
, &nl_tp
);
3366 err
= nl_ct_set_timeout_policy(&nl_tp
);
3368 VLOG_WARN_RL(&error_rl
, "failed to add timeout policy %s (%s)",
3369 nl_tp
.name
, ovs_strerror(err
));
3379 dpif_netlink_ct_get_timeout_policy(struct dpif
*dpif OVS_UNUSED
,
3381 struct ct_dpif_timeout_policy
*tp
)
3387 for (int i
= 0; i
< ARRAY_SIZE(tp_protos
); ++i
) {
3388 struct nl_ct_timeout_policy nl_tp
;
3391 dpif_netlink_format_tp_name(tp_id
, tp_protos
[i
].l3num
,
3392 tp_protos
[i
].l4num
, &nl_tp_name
);
3393 err
= nl_ct_get_timeout_policy(nl_tp_name
, &nl_tp
);
3396 VLOG_WARN_RL(&error_rl
, "failed to get timeout policy %s (%s)",
3397 nl_tp_name
, ovs_strerror(err
));
3402 dpif_netlink_set_ct_dpif_tp_attrs(&nl_tp
, tp
);
3409 /* Returns 0 if all the sub timeout policies are deleted or not exist in the
3410 * kernel. Returns 1 if any sub timeout policy deletion failed. */
3412 dpif_netlink_ct_del_timeout_policy(struct dpif
*dpif OVS_UNUSED
,
3417 for (int i
= 0; i
< ARRAY_SIZE(tp_protos
); ++i
) {
3419 dpif_netlink_format_tp_name(tp_id
, tp_protos
[i
].l3num
,
3420 tp_protos
[i
].l4num
, &nl_tp_name
);
3421 int err
= nl_ct_del_timeout_policy(nl_tp_name
);
3422 if (err
== ENOENT
) {
3426 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(6, 6);
3427 VLOG_INFO_RL(&rl
, "failed to delete timeout policy %s (%s)",
3428 nl_tp_name
, ovs_strerror(err
));
3437 struct dpif_netlink_ct_timeout_policy_dump_state
{
3438 struct nl_ct_timeout_policy_dump_state
*nl_dump_state
;
3439 struct hmap tp_dump_map
;
3442 struct dpif_netlink_tp_dump_node
{
3443 struct hmap_node hmap_node
; /* node in tp_dump_map. */
3444 struct ct_dpif_timeout_policy
*tp
;
3445 uint32_t l3_l4_present
;
3448 static struct dpif_netlink_tp_dump_node
*
3449 get_dpif_netlink_tp_dump_node_by_tp_id(uint32_t tp_id
,
3450 struct hmap
*tp_dump_map
)
3452 struct dpif_netlink_tp_dump_node
*tp_dump_node
;
3454 HMAP_FOR_EACH_WITH_HASH (tp_dump_node
, hmap_node
, hash_int(tp_id
, 0),
3456 if (tp_dump_node
->tp
->id
== tp_id
) {
3457 return tp_dump_node
;
3464 update_dpif_netlink_tp_dump_node(
3465 const struct nl_ct_timeout_policy
*nl_tp
,
3466 struct dpif_netlink_tp_dump_node
*tp_dump_node
)
3468 dpif_netlink_set_ct_dpif_tp_attrs(nl_tp
, tp_dump_node
->tp
);
3469 for (int i
= 0; i
< DPIF_NL_TP_MAX
; ++i
) {
3470 if (nl_tp
->l3num
== tp_protos
[i
].l3num
&&
3471 nl_tp
->l4num
== tp_protos
[i
].l4num
) {
3472 tp_dump_node
->l3_l4_present
|= 1 << i
;
3479 dpif_netlink_ct_timeout_policy_dump_start(struct dpif
*dpif OVS_UNUSED
,
3482 struct dpif_netlink_ct_timeout_policy_dump_state
*dump_state
;
3484 *statep
= dump_state
= xzalloc(sizeof *dump_state
);
3485 int err
= nl_ct_timeout_policy_dump_start(&dump_state
->nl_dump_state
);
3490 hmap_init(&dump_state
->tp_dump_map
);
3495 get_and_cleanup_tp_dump_node(struct hmap
*hmap
,
3496 struct dpif_netlink_tp_dump_node
*tp_dump_node
,
3497 struct ct_dpif_timeout_policy
*tp
)
3499 hmap_remove(hmap
, &tp_dump_node
->hmap_node
);
3500 *tp
= *tp_dump_node
->tp
;
3501 free(tp_dump_node
->tp
);
3506 dpif_netlink_ct_timeout_policy_dump_next(struct dpif
*dpif OVS_UNUSED
,
3508 struct ct_dpif_timeout_policy
*tp
)
3510 struct dpif_netlink_ct_timeout_policy_dump_state
*dump_state
= state
;
3511 struct dpif_netlink_tp_dump_node
*tp_dump_node
;
3514 /* Dumps all the timeout policies in the kernel. */
3516 struct nl_ct_timeout_policy nl_tp
;
3519 err
= nl_ct_timeout_policy_dump_next(dump_state
->nl_dump_state
,
3525 /* We only interest in OVS installed timeout policies. */
3526 if (!ovs_scan(nl_tp
.name
, NL_TP_NAME_PREFIX
"%"PRIu32
, &tp_id
)) {
3530 tp_dump_node
= get_dpif_netlink_tp_dump_node_by_tp_id(
3531 tp_id
, &dump_state
->tp_dump_map
);
3532 if (!tp_dump_node
) {
3533 tp_dump_node
= xzalloc(sizeof *tp_dump_node
);
3534 tp_dump_node
->tp
= xzalloc(sizeof *tp_dump_node
->tp
);
3535 tp_dump_node
->tp
->id
= tp_id
;
3536 hmap_insert(&dump_state
->tp_dump_map
, &tp_dump_node
->hmap_node
,
3537 hash_int(tp_id
, 0));
3540 update_dpif_netlink_tp_dump_node(&nl_tp
, tp_dump_node
);
3542 /* Returns one ct_dpif_timeout_policy if we gather all the L3/L4
3544 if (tp_dump_node
->l3_l4_present
== DPIF_NL_ALL_TP
) {
3545 get_and_cleanup_tp_dump_node(&dump_state
->tp_dump_map
,
3551 /* Dump the incomplete timeout policies. */
3553 if (!hmap_is_empty(&dump_state
->tp_dump_map
)) {
3554 struct hmap_node
*hmap_node
= hmap_first(&dump_state
->tp_dump_map
);
3555 tp_dump_node
= CONTAINER_OF(hmap_node
,
3556 struct dpif_netlink_tp_dump_node
,
3558 get_and_cleanup_tp_dump_node(&dump_state
->tp_dump_map
,
3568 dpif_netlink_ct_timeout_policy_dump_done(struct dpif
*dpif OVS_UNUSED
,
3571 struct dpif_netlink_ct_timeout_policy_dump_state
*dump_state
= state
;
3572 struct dpif_netlink_tp_dump_node
*tp_dump_node
;
3574 int err
= nl_ct_timeout_policy_dump_done(dump_state
->nl_dump_state
);
3575 HMAP_FOR_EACH_POP (tp_dump_node
, hmap_node
, &dump_state
->tp_dump_map
) {
3576 free(tp_dump_node
->tp
);
3579 hmap_destroy(&dump_state
->tp_dump_map
);
3588 /* Set of supported meter flags */
3589 #define DP_SUPPORTED_METER_FLAGS_MASK \
3590 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
3592 /* Meter support was introduced in Linux 4.15. In some versions of
3593 * Linux 4.15, 4.16, and 4.17, there was a bug that never set the id
3594 * when the meter was created, so all meters essentially had an id of
3595 * zero. Check for that condition and disable meters on those kernels. */
3596 static bool probe_broken_meters(struct dpif
*);
3599 dpif_netlink_meter_init(struct dpif_netlink
*dpif
, struct ofpbuf
*buf
,
3600 void *stub
, size_t size
, uint32_t command
)
3602 ofpbuf_use_stub(buf
, stub
, size
);
3604 nl_msg_put_genlmsghdr(buf
, 0, ovs_meter_family
, NLM_F_REQUEST
| NLM_F_ECHO
,
3605 command
, OVS_METER_VERSION
);
3607 struct ovs_header
*ovs_header
;
3608 ovs_header
= ofpbuf_put_uninit(buf
, sizeof *ovs_header
);
3609 ovs_header
->dp_ifindex
= dpif
->dp_ifindex
;
3612 /* Execute meter 'request' in the kernel datapath. If the command
3613 * fails, returns a positive errno value. Otherwise, stores the reply
3614 * in '*replyp', parses the policy according to 'reply_policy' into the
3615 * array of Netlink attribute in 'a', and returns 0. On success, the
3616 * caller is responsible for calling ofpbuf_delete() on '*replyp'
3617 * ('replyp' will contain pointers into 'a'). */
3619 dpif_netlink_meter_transact(struct ofpbuf
*request
, struct ofpbuf
**replyp
,
3620 const struct nl_policy
*reply_policy
,
3621 struct nlattr
**a
, size_t size_a
)
3623 int error
= nl_transact(NETLINK_GENERIC
, request
, replyp
);
3624 ofpbuf_uninit(request
);
3630 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(*replyp
, sizeof *nlmsg
);
3631 struct genlmsghdr
*genl
= ofpbuf_try_pull(*replyp
, sizeof *genl
);
3632 struct ovs_header
*ovs_header
= ofpbuf_try_pull(*replyp
,
3633 sizeof *ovs_header
);
3634 if (!nlmsg
|| !genl
|| !ovs_header
3635 || nlmsg
->nlmsg_type
!= ovs_meter_family
3636 || !nl_policy_parse(*replyp
, 0, reply_policy
, a
, size_a
)) {
3637 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3639 "Kernel module response to meter tranaction is invalid");
3646 dpif_netlink_meter_get_features(const struct dpif
*dpif_
,
3647 struct ofputil_meter_features
*features
)
3649 if (probe_broken_meters(CONST_CAST(struct dpif
*, dpif_
))) {
3654 struct ofpbuf buf
, *msg
;
3655 uint64_t stub
[1024 / 8];
3657 static const struct nl_policy ovs_meter_features_policy
[] = {
3658 [OVS_METER_ATTR_MAX_METERS
] = { .type
= NL_A_U32
},
3659 [OVS_METER_ATTR_MAX_BANDS
] = { .type
= NL_A_U32
},
3660 [OVS_METER_ATTR_BANDS
] = { .type
= NL_A_NESTED
, .optional
= true },
3662 struct nlattr
*a
[ARRAY_SIZE(ovs_meter_features_policy
)];
3664 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
3665 dpif_netlink_meter_init(dpif
, &buf
, stub
, sizeof stub
,
3666 OVS_METER_CMD_FEATURES
);
3667 if (dpif_netlink_meter_transact(&buf
, &msg
, ovs_meter_features_policy
, a
,
3668 ARRAY_SIZE(ovs_meter_features_policy
))) {
3669 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3671 "dpif_netlink_meter_transact OVS_METER_CMD_FEATURES failed");
3675 features
->max_meters
= nl_attr_get_u32(a
[OVS_METER_ATTR_MAX_METERS
]);
3676 features
->max_bands
= nl_attr_get_u32(a
[OVS_METER_ATTR_MAX_BANDS
]);
3678 /* Bands is a nested attribute of zero or more nested
3679 * band attributes. */
3680 if (a
[OVS_METER_ATTR_BANDS
]) {
3681 const struct nlattr
*nla
;
3684 NL_NESTED_FOR_EACH (nla
, left
, a
[OVS_METER_ATTR_BANDS
]) {
3685 const struct nlattr
*band_nla
;
3688 NL_NESTED_FOR_EACH (band_nla
, band_left
, nla
) {
3689 if (nl_attr_type(band_nla
) == OVS_BAND_ATTR_TYPE
) {
3690 if (nl_attr_get_size(band_nla
) == sizeof(uint32_t)) {
3691 switch (nl_attr_get_u32(band_nla
)) {
3692 case OVS_METER_BAND_TYPE_DROP
:
3693 features
->band_types
|= 1 << OFPMBT13_DROP
;
3701 features
->capabilities
= DP_SUPPORTED_METER_FLAGS_MASK
;
3707 dpif_netlink_meter_set__(struct dpif
*dpif_
, ofproto_meter_id meter_id
,
3708 struct ofputil_meter_config
*config
)
3710 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
3711 struct ofpbuf buf
, *msg
;
3712 uint64_t stub
[1024 / 8];
3714 static const struct nl_policy ovs_meter_set_response_policy
[] = {
3715 [OVS_METER_ATTR_ID
] = { .type
= NL_A_U32
},
3717 struct nlattr
*a
[ARRAY_SIZE(ovs_meter_set_response_policy
)];
3719 if (config
->flags
& ~DP_SUPPORTED_METER_FLAGS_MASK
) {
3720 return EBADF
; /* Unsupported flags set */
3723 for (size_t i
= 0; i
< config
->n_bands
; i
++) {
3724 switch (config
->bands
[i
].type
) {
3728 return ENODEV
; /* Unsupported band type */
3732 dpif_netlink_meter_init(dpif
, &buf
, stub
, sizeof stub
, OVS_METER_CMD_SET
);
3734 nl_msg_put_u32(&buf
, OVS_METER_ATTR_ID
, meter_id
.uint32
);
3736 if (config
->flags
& OFPMF13_KBPS
) {
3737 nl_msg_put_flag(&buf
, OVS_METER_ATTR_KBPS
);
3740 size_t bands_offset
= nl_msg_start_nested(&buf
, OVS_METER_ATTR_BANDS
);
3742 for (size_t i
= 0; i
< config
->n_bands
; ++i
) {
3743 struct ofputil_meter_band
* band
= &config
->bands
[i
];
3746 size_t band_offset
= nl_msg_start_nested(&buf
, OVS_BAND_ATTR_UNSPEC
);
3748 switch (band
->type
) {
3750 band_type
= OVS_METER_BAND_TYPE_DROP
;
3753 band_type
= OVS_METER_BAND_TYPE_UNSPEC
;
3755 nl_msg_put_u32(&buf
, OVS_BAND_ATTR_TYPE
, band_type
);
3756 nl_msg_put_u32(&buf
, OVS_BAND_ATTR_RATE
, band
->rate
);
3757 nl_msg_put_u32(&buf
, OVS_BAND_ATTR_BURST
,
3758 config
->flags
& OFPMF13_BURST
?
3759 band
->burst_size
: band
->rate
);
3760 nl_msg_end_nested(&buf
, band_offset
);
3762 nl_msg_end_nested(&buf
, bands_offset
);
3764 int error
= dpif_netlink_meter_transact(&buf
, &msg
,
3765 ovs_meter_set_response_policy
, a
,
3766 ARRAY_SIZE(ovs_meter_set_response_policy
));
3768 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3770 "dpif_netlink_meter_transact OVS_METER_CMD_SET failed");
3774 if (nl_attr_get_u32(a
[OVS_METER_ATTR_ID
]) != meter_id
.uint32
) {
3775 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3777 "Kernel returned a different meter id than requested");
3784 dpif_netlink_meter_set(struct dpif
*dpif_
, ofproto_meter_id meter_id
,
3785 struct ofputil_meter_config
*config
)
3787 if (probe_broken_meters(dpif_
)) {
3791 return dpif_netlink_meter_set__(dpif_
, meter_id
, config
);
3794 /* Retrieve statistics and/or delete meter 'meter_id'. Statistics are
3795 * stored in 'stats', if it is not null. If 'command' is
3796 * OVS_METER_CMD_DEL, the meter is deleted and statistics are optionally
3797 * retrieved. If 'command' is OVS_METER_CMD_GET, then statistics are
3798 * simply retrieved. */
3800 dpif_netlink_meter_get_stats(const struct dpif
*dpif_
,
3801 ofproto_meter_id meter_id
,
3802 struct ofputil_meter_stats
*stats
,
3804 enum ovs_meter_cmd command
)
3806 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
3807 struct ofpbuf buf
, *msg
;
3808 uint64_t stub
[1024 / 8];
3810 static const struct nl_policy ovs_meter_stats_policy
[] = {
3811 [OVS_METER_ATTR_ID
] = { .type
= NL_A_U32
, .optional
= true},
3812 [OVS_METER_ATTR_STATS
] = { NL_POLICY_FOR(struct ovs_flow_stats
),
3814 [OVS_METER_ATTR_BANDS
] = { .type
= NL_A_NESTED
, .optional
= true },
3816 struct nlattr
*a
[ARRAY_SIZE(ovs_meter_stats_policy
)];
3818 dpif_netlink_meter_init(dpif
, &buf
, stub
, sizeof stub
, command
);
3820 nl_msg_put_u32(&buf
, OVS_METER_ATTR_ID
, meter_id
.uint32
);
3822 int error
= dpif_netlink_meter_transact(&buf
, &msg
,
3823 ovs_meter_stats_policy
, a
,
3824 ARRAY_SIZE(ovs_meter_stats_policy
));
3826 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3827 VLOG_INFO_RL(&rl
, "dpif_netlink_meter_transact %s failed",
3828 command
== OVS_METER_CMD_GET
? "get" : "del");
3833 && a
[OVS_METER_ATTR_ID
]
3834 && a
[OVS_METER_ATTR_STATS
]
3835 && nl_attr_get_u32(a
[OVS_METER_ATTR_ID
]) == meter_id
.uint32
) {
3837 const struct ovs_flow_stats
*stat
;
3838 const struct nlattr
*nla
;
3841 stat
= nl_attr_get(a
[OVS_METER_ATTR_STATS
]);
3842 stats
->packet_in_count
= get_32aligned_u64(&stat
->n_packets
);
3843 stats
->byte_in_count
= get_32aligned_u64(&stat
->n_bytes
);
3845 if (a
[OVS_METER_ATTR_BANDS
]) {
3847 NL_NESTED_FOR_EACH (nla
, left
, a
[OVS_METER_ATTR_BANDS
]) {
3848 const struct nlattr
*band_nla
;
3849 band_nla
= nl_attr_find_nested(nla
, OVS_BAND_ATTR_STATS
);
3850 if (band_nla
&& nl_attr_get_size(band_nla
) \
3851 == sizeof(struct ovs_flow_stats
)) {
3852 stat
= nl_attr_get(band_nla
);
3854 if (n_bands
< max_bands
) {
3855 stats
->bands
[n_bands
].packet_count
3856 = get_32aligned_u64(&stat
->n_packets
);
3857 stats
->bands
[n_bands
].byte_count
3858 = get_32aligned_u64(&stat
->n_bytes
);
3862 stats
->bands
[n_bands
].packet_count
= 0;
3863 stats
->bands
[n_bands
].byte_count
= 0;
3867 stats
->n_bands
= n_bands
;
3869 /* For a non-existent meter, return 0 stats. */
3879 dpif_netlink_meter_get(const struct dpif
*dpif
, ofproto_meter_id meter_id
,
3880 struct ofputil_meter_stats
*stats
, uint16_t max_bands
)
3882 return dpif_netlink_meter_get_stats(dpif
, meter_id
, stats
, max_bands
,
3887 dpif_netlink_meter_del(struct dpif
*dpif
, ofproto_meter_id meter_id
,
3888 struct ofputil_meter_stats
*stats
, uint16_t max_bands
)
3890 return dpif_netlink_meter_get_stats(dpif
, meter_id
, stats
, max_bands
,
3895 probe_broken_meters__(struct dpif
*dpif
)
3897 /* This test is destructive if a probe occurs while ovs-vswitchd is
3898 * running (e.g., an ovs-dpctl meter command is called), so choose a
3899 * random high meter id to make this less likely to occur. */
3900 ofproto_meter_id id1
= { 54545401 };
3901 ofproto_meter_id id2
= { 54545402 };
3902 struct ofputil_meter_band band
= {OFPMBT13_DROP
, 0, 1, 0};
3903 struct ofputil_meter_config config1
= { 1, OFPMF13_KBPS
, 1, &band
};
3904 struct ofputil_meter_config config2
= { 2, OFPMF13_KBPS
, 1, &band
};
3906 /* Try adding two meters and make sure that they both come back with
3907 * the proper meter id. Use the "__" version so that we don't cause
3908 * a recurve deadlock. */
3909 dpif_netlink_meter_set__(dpif
, id1
, &config1
);
3910 dpif_netlink_meter_set__(dpif
, id2
, &config2
);
3912 if (dpif_netlink_meter_get(dpif
, id1
, NULL
, 0)
3913 || dpif_netlink_meter_get(dpif
, id2
, NULL
, 0)) {
3914 VLOG_INFO("The kernel module has a broken meter implementation.");
3918 dpif_netlink_meter_del(dpif
, id1
, NULL
, 0);
3919 dpif_netlink_meter_del(dpif
, id2
, NULL
, 0);
3925 probe_broken_meters(struct dpif
*dpif
)
3927 /* This is a once-only test because currently OVS only has at most a single
3928 * Netlink capable datapath on any given platform. */
3929 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3931 static bool broken_meters
= false;
3932 if (ovsthread_once_start(&once
)) {
3933 broken_meters
= probe_broken_meters__(dpif
);
3934 ovsthread_once_done(&once
);
3936 return broken_meters
;
3939 const struct dpif_class dpif_netlink_class
= {
3941 false, /* cleanup_required */
3943 dpif_netlink_enumerate
,
3947 dpif_netlink_destroy
,
3950 dpif_netlink_get_stats
,
3951 dpif_netlink_set_features
,
3952 dpif_netlink_port_add
,
3953 dpif_netlink_port_del
,
3954 NULL
, /* port_set_config */
3955 dpif_netlink_port_query_by_number
,
3956 dpif_netlink_port_query_by_name
,
3957 dpif_netlink_port_get_pid
,
3958 dpif_netlink_port_dump_start
,
3959 dpif_netlink_port_dump_next
,
3960 dpif_netlink_port_dump_done
,
3961 dpif_netlink_port_poll
,
3962 dpif_netlink_port_poll_wait
,
3963 dpif_netlink_flow_flush
,
3964 dpif_netlink_flow_dump_create
,
3965 dpif_netlink_flow_dump_destroy
,
3966 dpif_netlink_flow_dump_thread_create
,
3967 dpif_netlink_flow_dump_thread_destroy
,
3968 dpif_netlink_flow_dump_next
,
3969 dpif_netlink_operate
,
3970 dpif_netlink_recv_set
,
3971 dpif_netlink_handlers_set
,
3972 NULL
, /* set_config */
3973 dpif_netlink_queue_to_priority
,
3975 dpif_netlink_recv_wait
,
3976 dpif_netlink_recv_purge
,
3977 NULL
, /* register_dp_purge_cb */
3978 NULL
, /* register_upcall_cb */
3979 NULL
, /* enable_upcall */
3980 NULL
, /* disable_upcall */
3981 dpif_netlink_get_datapath_version
, /* get_datapath_version */
3982 dpif_netlink_ct_dump_start
,
3983 dpif_netlink_ct_dump_next
,
3984 dpif_netlink_ct_dump_done
,
3985 dpif_netlink_ct_flush
,
3986 NULL
, /* ct_set_maxconns */
3987 NULL
, /* ct_get_maxconns */
3988 NULL
, /* ct_get_nconns */
3989 NULL
, /* ct_set_tcp_seq_chk */
3990 NULL
, /* ct_get_tcp_seq_chk */
3991 dpif_netlink_ct_set_limits
,
3992 dpif_netlink_ct_get_limits
,
3993 dpif_netlink_ct_del_limits
,
3994 dpif_netlink_ct_set_timeout_policy
,
3995 dpif_netlink_ct_get_timeout_policy
,
3996 dpif_netlink_ct_del_timeout_policy
,
3997 dpif_netlink_ct_timeout_policy_dump_start
,
3998 dpif_netlink_ct_timeout_policy_dump_next
,
3999 dpif_netlink_ct_timeout_policy_dump_done
,
4000 dpif_netlink_ct_get_timeout_policy_name
,
4001 NULL
, /* ipf_set_enabled */
4002 NULL
, /* ipf_set_min_frag */
4003 NULL
, /* ipf_set_max_nfrags */
4004 NULL
, /* ipf_get_status */
4005 NULL
, /* ipf_dump_start */
4006 NULL
, /* ipf_dump_next */
4007 NULL
, /* ipf_dump_done */
4008 dpif_netlink_meter_get_features
,
4009 dpif_netlink_meter_set
,
4010 dpif_netlink_meter_get
,
4011 dpif_netlink_meter_del
,
4012 NULL
, /* bond_add */
4013 NULL
, /* bond_del */
4014 NULL
, /* bond_stats_get */
4018 dpif_netlink_init(void)
4020 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
4023 if (ovsthread_once_start(&once
)) {
4024 error
= nl_lookup_genl_family(OVS_DATAPATH_FAMILY
,
4025 &ovs_datapath_family
);
4027 VLOG_INFO("Generic Netlink family '%s' does not exist. "
4028 "The Open vSwitch kernel module is probably not loaded.",
4029 OVS_DATAPATH_FAMILY
);
4032 error
= nl_lookup_genl_family(OVS_VPORT_FAMILY
, &ovs_vport_family
);
4035 error
= nl_lookup_genl_family(OVS_FLOW_FAMILY
, &ovs_flow_family
);
4038 error
= nl_lookup_genl_family(OVS_PACKET_FAMILY
,
4039 &ovs_packet_family
);
4042 error
= nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY
, OVS_VPORT_MCGROUP
,
4043 &ovs_vport_mcgroup
);
4046 if (nl_lookup_genl_family(OVS_METER_FAMILY
, &ovs_meter_family
)) {
4047 VLOG_INFO("The kernel module does not support meters.");
4050 if (nl_lookup_genl_family(OVS_CT_LIMIT_FAMILY
,
4051 &ovs_ct_limit_family
) < 0) {
4052 VLOG_INFO("Generic Netlink family '%s' does not exist. "
4053 "Please update the Open vSwitch kernel module to enable "
4054 "the conntrack limit feature.", OVS_CT_LIMIT_FAMILY
);
4057 ovs_tunnels_out_of_tree
= dpif_netlink_rtnl_probe_oot_tunnels();
4059 ovsthread_once_done(&once
);
4066 dpif_netlink_is_internal_device(const char *name
)
4068 struct dpif_netlink_vport reply
;
4072 error
= dpif_netlink_vport_get(name
, &reply
, &buf
);
4075 } else if (error
!= ENODEV
&& error
!= ENOENT
) {
4076 VLOG_WARN_RL(&error_rl
, "%s: vport query failed (%s)",
4077 name
, ovs_strerror(error
));
4080 return reply
.type
== OVS_VPORT_TYPE_INTERNAL
;
4083 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4084 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
4085 * positive errno value.
4087 * 'vport' will contain pointers into 'buf', so the caller should not free
4088 * 'buf' while 'vport' is still in use. */
4090 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport
*vport
,
4091 const struct ofpbuf
*buf
)
4093 static const struct nl_policy ovs_vport_policy
[] = {
4094 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NL_A_U32
},
4095 [OVS_VPORT_ATTR_TYPE
] = { .type
= NL_A_U32
},
4096 [OVS_VPORT_ATTR_NAME
] = { .type
= NL_A_STRING
, .max_len
= IFNAMSIZ
},
4097 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NL_A_UNSPEC
},
4098 [OVS_VPORT_ATTR_STATS
] = { NL_POLICY_FOR(struct ovs_vport_stats
),
4100 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NL_A_NESTED
, .optional
= true },
4101 [OVS_VPORT_ATTR_NETNSID
] = { .type
= NL_A_U32
, .optional
= true },
4104 dpif_netlink_vport_init(vport
);
4106 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
4107 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
4108 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
4109 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
4111 struct nlattr
*a
[ARRAY_SIZE(ovs_vport_policy
)];
4112 if (!nlmsg
|| !genl
|| !ovs_header
4113 || nlmsg
->nlmsg_type
!= ovs_vport_family
4114 || !nl_policy_parse(&b
, 0, ovs_vport_policy
, a
,
4115 ARRAY_SIZE(ovs_vport_policy
))) {
4119 vport
->cmd
= genl
->cmd
;
4120 vport
->dp_ifindex
= ovs_header
->dp_ifindex
;
4121 vport
->port_no
= nl_attr_get_odp_port(a
[OVS_VPORT_ATTR_PORT_NO
]);
4122 vport
->type
= nl_attr_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
4123 vport
->name
= nl_attr_get_string(a
[OVS_VPORT_ATTR_NAME
]);
4124 if (a
[OVS_VPORT_ATTR_UPCALL_PID
]) {
4125 vport
->n_upcall_pids
= nl_attr_get_size(a
[OVS_VPORT_ATTR_UPCALL_PID
])
4126 / (sizeof *vport
->upcall_pids
);
4127 vport
->upcall_pids
= nl_attr_get(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
4130 if (a
[OVS_VPORT_ATTR_STATS
]) {
4131 vport
->stats
= nl_attr_get(a
[OVS_VPORT_ATTR_STATS
]);
4133 if (a
[OVS_VPORT_ATTR_OPTIONS
]) {
4134 vport
->options
= nl_attr_get(a
[OVS_VPORT_ATTR_OPTIONS
]);
4135 vport
->options_len
= nl_attr_get_size(a
[OVS_VPORT_ATTR_OPTIONS
]);
4137 if (a
[OVS_VPORT_ATTR_NETNSID
]) {
4138 netnsid_set(&vport
->netnsid
,
4139 nl_attr_get_u32(a
[OVS_VPORT_ATTR_NETNSID
]));
4141 netnsid_set_local(&vport
->netnsid
);
4146 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
4147 * followed by Netlink attributes corresponding to 'vport'. */
4149 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport
*vport
,
4152 struct ovs_header
*ovs_header
;
4154 nl_msg_put_genlmsghdr(buf
, 0, ovs_vport_family
, NLM_F_REQUEST
| NLM_F_ECHO
,
4155 vport
->cmd
, OVS_VPORT_VERSION
);
4157 ovs_header
= ofpbuf_put_uninit(buf
, sizeof *ovs_header
);
4158 ovs_header
->dp_ifindex
= vport
->dp_ifindex
;
4160 if (vport
->port_no
!= ODPP_NONE
) {
4161 nl_msg_put_odp_port(buf
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
);
4164 if (vport
->type
!= OVS_VPORT_TYPE_UNSPEC
) {
4165 nl_msg_put_u32(buf
, OVS_VPORT_ATTR_TYPE
, vport
->type
);
4169 nl_msg_put_string(buf
, OVS_VPORT_ATTR_NAME
, vport
->name
);
4172 if (vport
->upcall_pids
) {
4173 nl_msg_put_unspec(buf
, OVS_VPORT_ATTR_UPCALL_PID
,
4175 vport
->n_upcall_pids
* sizeof *vport
->upcall_pids
);
4179 nl_msg_put_unspec(buf
, OVS_VPORT_ATTR_STATS
,
4180 vport
->stats
, sizeof *vport
->stats
);
4183 if (vport
->options
) {
4184 nl_msg_put_nested(buf
, OVS_VPORT_ATTR_OPTIONS
,
4185 vport
->options
, vport
->options_len
);
4189 /* Clears 'vport' to "empty" values. */
4191 dpif_netlink_vport_init(struct dpif_netlink_vport
*vport
)
4193 memset(vport
, 0, sizeof *vport
);
4194 vport
->port_no
= ODPP_NONE
;
4197 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4198 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4199 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4200 * result of the command is expected to be an ovs_vport also, which is decoded
4201 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
4202 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
4204 dpif_netlink_vport_transact(const struct dpif_netlink_vport
*request
,
4205 struct dpif_netlink_vport
*reply
,
4206 struct ofpbuf
**bufp
)
4208 struct ofpbuf
*request_buf
;
4211 ovs_assert((reply
!= NULL
) == (bufp
!= NULL
));
4213 error
= dpif_netlink_init();
4217 dpif_netlink_vport_init(reply
);
4222 request_buf
= ofpbuf_new(1024);
4223 dpif_netlink_vport_to_ofpbuf(request
, request_buf
);
4224 error
= nl_transact(NETLINK_GENERIC
, request_buf
, bufp
);
4225 ofpbuf_delete(request_buf
);
4229 error
= dpif_netlink_vport_from_ofpbuf(reply
, *bufp
);
4232 dpif_netlink_vport_init(reply
);
4233 ofpbuf_delete(*bufp
);
4240 /* Obtains information about the kernel vport named 'name' and stores it into
4241 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
4242 * longer needed ('reply' will contain pointers into '*bufp'). */
4244 dpif_netlink_vport_get(const char *name
, struct dpif_netlink_vport
*reply
,
4245 struct ofpbuf
**bufp
)
4247 struct dpif_netlink_vport request
;
4249 dpif_netlink_vport_init(&request
);
4250 request
.cmd
= OVS_VPORT_CMD_GET
;
4251 request
.name
= name
;
4253 return dpif_netlink_vport_transact(&request
, reply
, bufp
);
4256 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4257 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
4258 * positive errno value.
4260 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
4261 * while 'dp' is still in use. */
4263 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp
*dp
, const struct ofpbuf
*buf
)
4265 static const struct nl_policy ovs_datapath_policy
[] = {
4266 [OVS_DP_ATTR_NAME
] = { .type
= NL_A_STRING
, .max_len
= IFNAMSIZ
},
4267 [OVS_DP_ATTR_STATS
] = { NL_POLICY_FOR(struct ovs_dp_stats
),
4269 [OVS_DP_ATTR_MEGAFLOW_STATS
] = {
4270 NL_POLICY_FOR(struct ovs_dp_megaflow_stats
),
4272 [OVS_DP_ATTR_USER_FEATURES
] = {
4277 dpif_netlink_dp_init(dp
);
4279 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
4280 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
4281 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
4282 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
4284 struct nlattr
*a
[ARRAY_SIZE(ovs_datapath_policy
)];
4285 if (!nlmsg
|| !genl
|| !ovs_header
4286 || nlmsg
->nlmsg_type
!= ovs_datapath_family
4287 || !nl_policy_parse(&b
, 0, ovs_datapath_policy
, a
,
4288 ARRAY_SIZE(ovs_datapath_policy
))) {
4292 dp
->cmd
= genl
->cmd
;
4293 dp
->dp_ifindex
= ovs_header
->dp_ifindex
;
4294 dp
->name
= nl_attr_get_string(a
[OVS_DP_ATTR_NAME
]);
4295 if (a
[OVS_DP_ATTR_STATS
]) {
4296 dp
->stats
= nl_attr_get(a
[OVS_DP_ATTR_STATS
]);
4299 if (a
[OVS_DP_ATTR_MEGAFLOW_STATS
]) {
4300 dp
->megaflow_stats
= nl_attr_get(a
[OVS_DP_ATTR_MEGAFLOW_STATS
]);
4303 if (a
[OVS_DP_ATTR_USER_FEATURES
]) {
4304 dp
->user_features
= nl_attr_get_u32(a
[OVS_DP_ATTR_USER_FEATURES
]);
4310 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
4312 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp
*dp
, struct ofpbuf
*buf
)
4314 struct ovs_header
*ovs_header
;
4316 nl_msg_put_genlmsghdr(buf
, 0, ovs_datapath_family
,
4317 NLM_F_REQUEST
| NLM_F_ECHO
, dp
->cmd
,
4318 OVS_DATAPATH_VERSION
);
4320 ovs_header
= ofpbuf_put_uninit(buf
, sizeof *ovs_header
);
4321 ovs_header
->dp_ifindex
= dp
->dp_ifindex
;
4324 nl_msg_put_string(buf
, OVS_DP_ATTR_NAME
, dp
->name
);
4327 if (dp
->upcall_pid
) {
4328 nl_msg_put_u32(buf
, OVS_DP_ATTR_UPCALL_PID
, *dp
->upcall_pid
);
4331 if (dp
->user_features
) {
4332 nl_msg_put_u32(buf
, OVS_DP_ATTR_USER_FEATURES
, dp
->user_features
);
4335 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
4338 /* Clears 'dp' to "empty" values. */
4340 dpif_netlink_dp_init(struct dpif_netlink_dp
*dp
)
4342 memset(dp
, 0, sizeof *dp
);
4346 dpif_netlink_dp_dump_start(struct nl_dump
*dump
)
4348 struct dpif_netlink_dp request
;
4351 dpif_netlink_dp_init(&request
);
4352 request
.cmd
= OVS_DP_CMD_GET
;
4354 buf
= ofpbuf_new(1024);
4355 dpif_netlink_dp_to_ofpbuf(&request
, buf
);
4356 nl_dump_start(dump
, NETLINK_GENERIC
, buf
);
4360 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4361 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4362 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4363 * result of the command is expected to be of the same form, which is decoded
4364 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
4365 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
4367 dpif_netlink_dp_transact(const struct dpif_netlink_dp
*request
,
4368 struct dpif_netlink_dp
*reply
, struct ofpbuf
**bufp
)
4370 struct ofpbuf
*request_buf
;
4373 ovs_assert((reply
!= NULL
) == (bufp
!= NULL
));
4375 request_buf
= ofpbuf_new(1024);
4376 dpif_netlink_dp_to_ofpbuf(request
, request_buf
);
4377 error
= nl_transact(NETLINK_GENERIC
, request_buf
, bufp
);
4378 ofpbuf_delete(request_buf
);
4381 dpif_netlink_dp_init(reply
);
4383 error
= dpif_netlink_dp_from_ofpbuf(reply
, *bufp
);
4386 ofpbuf_delete(*bufp
);
4393 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
4394 * The caller must free '*bufp' when the reply is no longer needed ('reply'
4395 * will contain pointers into '*bufp'). */
4397 dpif_netlink_dp_get(const struct dpif
*dpif_
, struct dpif_netlink_dp
*reply
,
4398 struct ofpbuf
**bufp
)
4400 struct dpif_netlink
*dpif
= dpif_netlink_cast(dpif_
);
4401 struct dpif_netlink_dp request
;
4403 dpif_netlink_dp_init(&request
);
4404 request
.cmd
= OVS_DP_CMD_GET
;
4405 request
.dp_ifindex
= dpif
->dp_ifindex
;
4407 return dpif_netlink_dp_transact(&request
, reply
, bufp
);
4410 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4411 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
4412 * positive errno value.
4414 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
4415 * while 'flow' is still in use. */
4417 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow
*flow
,
4418 const struct ofpbuf
*buf
)
4420 static const struct nl_policy ovs_flow_policy
[__OVS_FLOW_ATTR_MAX
] = {
4421 [OVS_FLOW_ATTR_KEY
] = { .type
= NL_A_NESTED
, .optional
= true },
4422 [OVS_FLOW_ATTR_MASK
] = { .type
= NL_A_NESTED
, .optional
= true },
4423 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NL_A_NESTED
, .optional
= true },
4424 [OVS_FLOW_ATTR_STATS
] = { NL_POLICY_FOR(struct ovs_flow_stats
),
4426 [OVS_FLOW_ATTR_TCP_FLAGS
] = { .type
= NL_A_U8
, .optional
= true },
4427 [OVS_FLOW_ATTR_USED
] = { .type
= NL_A_U64
, .optional
= true },
4428 [OVS_FLOW_ATTR_UFID
] = { .type
= NL_A_U128
, .optional
= true },
4429 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
4430 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
4431 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
4434 dpif_netlink_flow_init(flow
);
4436 struct ofpbuf b
= ofpbuf_const_initializer(buf
->data
, buf
->size
);
4437 struct nlmsghdr
*nlmsg
= ofpbuf_try_pull(&b
, sizeof *nlmsg
);
4438 struct genlmsghdr
*genl
= ofpbuf_try_pull(&b
, sizeof *genl
);
4439 struct ovs_header
*ovs_header
= ofpbuf_try_pull(&b
, sizeof *ovs_header
);
4441 struct nlattr
*a
[ARRAY_SIZE(ovs_flow_policy
)];
4442 if (!nlmsg
|| !genl
|| !ovs_header
4443 || nlmsg
->nlmsg_type
!= ovs_flow_family
4444 || !nl_policy_parse(&b
, 0, ovs_flow_policy
, a
,
4445 ARRAY_SIZE(ovs_flow_policy
))) {
4448 if (!a
[OVS_FLOW_ATTR_KEY
] && !a
[OVS_FLOW_ATTR_UFID
]) {
4452 flow
->nlmsg_flags
= nlmsg
->nlmsg_flags
;
4453 flow
->dp_ifindex
= ovs_header
->dp_ifindex
;
4454 if (a
[OVS_FLOW_ATTR_KEY
]) {
4455 flow
->key
= nl_attr_get(a
[OVS_FLOW_ATTR_KEY
]);
4456 flow
->key_len
= nl_attr_get_size(a
[OVS_FLOW_ATTR_KEY
]);
4459 if (a
[OVS_FLOW_ATTR_UFID
]) {
4460 flow
->ufid
= nl_attr_get_u128(a
[OVS_FLOW_ATTR_UFID
]);
4461 flow
->ufid_present
= true;
4463 if (a
[OVS_FLOW_ATTR_MASK
]) {
4464 flow
->mask
= nl_attr_get(a
[OVS_FLOW_ATTR_MASK
]);
4465 flow
->mask_len
= nl_attr_get_size(a
[OVS_FLOW_ATTR_MASK
]);
4467 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
4468 flow
->actions
= nl_attr_get(a
[OVS_FLOW_ATTR_ACTIONS
]);
4469 flow
->actions_len
= nl_attr_get_size(a
[OVS_FLOW_ATTR_ACTIONS
]);
4471 if (a
[OVS_FLOW_ATTR_STATS
]) {
4472 flow
->stats
= nl_attr_get(a
[OVS_FLOW_ATTR_STATS
]);
4474 if (a
[OVS_FLOW_ATTR_TCP_FLAGS
]) {
4475 flow
->tcp_flags
= nl_attr_get(a
[OVS_FLOW_ATTR_TCP_FLAGS
]);
4477 if (a
[OVS_FLOW_ATTR_USED
]) {
4478 flow
->used
= nl_attr_get(a
[OVS_FLOW_ATTR_USED
]);
4485 * If PACKET_TYPE attribute is present in 'data', it filters PACKET_TYPE out.
4486 * If the flow is not Ethernet, the OVS_KEY_ATTR_PACKET_TYPE is converted to
4487 * OVS_KEY_ATTR_ETHERTYPE. Puts 'data' to 'buf'.
4490 put_exclude_packet_type(struct ofpbuf
*buf
, uint16_t type
,
4491 const struct nlattr
*data
, uint16_t data_len
)
4493 const struct nlattr
*packet_type
;
4495 packet_type
= nl_attr_find__(data
, data_len
, OVS_KEY_ATTR_PACKET_TYPE
);
4498 /* exclude PACKET_TYPE Netlink attribute. */
4499 ovs_assert(NLA_ALIGN(packet_type
->nla_len
) == NL_A_U32_SIZE
);
4500 size_t packet_type_len
= NL_A_U32_SIZE
;
4501 size_t first_chunk_size
= (uint8_t *)packet_type
- (uint8_t *)data
;
4502 size_t second_chunk_size
= data_len
- first_chunk_size
4504 struct nlattr
*next_attr
= nl_attr_next(packet_type
);
4507 ofs
= nl_msg_start_nested(buf
, type
);
4508 nl_msg_put(buf
, data
, first_chunk_size
);
4509 nl_msg_put(buf
, next_attr
, second_chunk_size
);
4510 if (!nl_attr_find__(data
, data_len
, OVS_KEY_ATTR_ETHERNET
)) {
4511 ovs_be16 pt
= pt_ns_type_be(nl_attr_get_be32(packet_type
));
4512 const struct nlattr
*nla
;
4514 nla
= nl_attr_find(buf
, ofs
+ NLA_HDRLEN
, OVS_KEY_ATTR_ETHERTYPE
);
4516 ovs_be16
*ethertype
;
4518 ethertype
= CONST_CAST(ovs_be16
*, nl_attr_get(nla
));
4521 nl_msg_put_be16(buf
, OVS_KEY_ATTR_ETHERTYPE
, pt
);
4524 nl_msg_end_nested(buf
, ofs
);
4526 nl_msg_put_unspec(buf
, type
, data
, data_len
);
4530 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
4531 * followed by Netlink attributes corresponding to 'flow'. */
4533 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow
*flow
,
4536 struct ovs_header
*ovs_header
;
4538 nl_msg_put_genlmsghdr(buf
, 0, ovs_flow_family
,
4539 NLM_F_REQUEST
| flow
->nlmsg_flags
,
4540 flow
->cmd
, OVS_FLOW_VERSION
);
4542 ovs_header
= ofpbuf_put_uninit(buf
, sizeof *ovs_header
);
4543 ovs_header
->dp_ifindex
= flow
->dp_ifindex
;
4545 if (flow
->ufid_present
) {
4546 nl_msg_put_u128(buf
, OVS_FLOW_ATTR_UFID
, flow
->ufid
);
4548 if (flow
->ufid_terse
) {
4549 nl_msg_put_u32(buf
, OVS_FLOW_ATTR_UFID_FLAGS
,
4550 OVS_UFID_F_OMIT_KEY
| OVS_UFID_F_OMIT_MASK
4551 | OVS_UFID_F_OMIT_ACTIONS
);
4553 if (!flow
->ufid_terse
|| !flow
->ufid_present
) {
4554 if (flow
->key_len
) {
4555 put_exclude_packet_type(buf
, OVS_FLOW_ATTR_KEY
, flow
->key
,
4558 if (flow
->mask_len
) {
4559 put_exclude_packet_type(buf
, OVS_FLOW_ATTR_MASK
, flow
->mask
,
4562 if (flow
->actions
|| flow
->actions_len
) {
4563 nl_msg_put_unspec(buf
, OVS_FLOW_ATTR_ACTIONS
,
4564 flow
->actions
, flow
->actions_len
);
4568 /* We never need to send these to the kernel. */
4569 ovs_assert(!flow
->stats
);
4570 ovs_assert(!flow
->tcp_flags
);
4571 ovs_assert(!flow
->used
);
4574 nl_msg_put_flag(buf
, OVS_FLOW_ATTR_CLEAR
);
4577 nl_msg_put_flag(buf
, OVS_FLOW_ATTR_PROBE
);
4581 /* Clears 'flow' to "empty" values. */
4583 dpif_netlink_flow_init(struct dpif_netlink_flow
*flow
)
4585 memset(flow
, 0, sizeof *flow
);
4588 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4589 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4590 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4591 * result of the command is expected to be a flow also, which is decoded and
4592 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
4593 * is no longer needed ('reply' will contain pointers into '*bufp'). */
4595 dpif_netlink_flow_transact(struct dpif_netlink_flow
*request
,
4596 struct dpif_netlink_flow
*reply
,
4597 struct ofpbuf
**bufp
)
4599 struct ofpbuf
*request_buf
;
4602 ovs_assert((reply
!= NULL
) == (bufp
!= NULL
));
4605 request
->nlmsg_flags
|= NLM_F_ECHO
;
4608 request_buf
= ofpbuf_new(1024);
4609 dpif_netlink_flow_to_ofpbuf(request
, request_buf
);
4610 error
= nl_transact(NETLINK_GENERIC
, request_buf
, bufp
);
4611 ofpbuf_delete(request_buf
);
4615 error
= dpif_netlink_flow_from_ofpbuf(reply
, *bufp
);
4618 dpif_netlink_flow_init(reply
);
4619 ofpbuf_delete(*bufp
);
4627 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow
*flow
,
4628 struct dpif_flow_stats
*stats
)
4631 stats
->n_packets
= get_32aligned_u64(&flow
->stats
->n_packets
);
4632 stats
->n_bytes
= get_32aligned_u64(&flow
->stats
->n_bytes
);
4634 stats
->n_packets
= 0;
4637 stats
->used
= flow
->used
? get_32aligned_u64(flow
->used
) : 0;
4638 stats
->tcp_flags
= flow
->tcp_flags
? *flow
->tcp_flags
: 0;
4641 /* Logs information about a packet that was recently lost in 'ch' (in
4644 report_loss(struct dpif_netlink
*dpif
, struct dpif_channel
*ch
, uint32_t ch_idx
,
4645 uint32_t handler_id
)
4647 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 5);
4650 if (VLOG_DROP_WARN(&rl
)) {
4655 if (ch
->last_poll
!= LLONG_MIN
) {
4656 ds_put_format(&s
, " (last polled %lld ms ago)",
4657 time_msec() - ch
->last_poll
);
4660 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
4661 dpif_name(&dpif
->dpif
), ch_idx
, handler_id
);