]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif-netlink.c
ovsdb-idl: Fix iteration over tracked rows with no actual data.
[mirror_ovs.git] / lib / dpif-netlink.c
1 /*
2 * Copyright (c) 2008-2018 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dpif-netlink.h"
20
21 #include <ctype.h>
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <net/if.h>
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
28 #include <poll.h>
29 #include <stdlib.h>
30 #include <strings.h>
31 #include <sys/epoll.h>
32 #include <sys/stat.h>
33 #include <unistd.h>
34
35 #include "bitmap.h"
36 #include "dpif-netlink-rtnl.h"
37 #include "dpif-provider.h"
38 #include "fat-rwlock.h"
39 #include "flow.h"
40 #include "netdev-linux.h"
41 #include "netdev-offload.h"
42 #include "netdev-provider.h"
43 #include "netdev-vport.h"
44 #include "netdev.h"
45 #include "netlink-conntrack.h"
46 #include "netlink-notifier.h"
47 #include "netlink-socket.h"
48 #include "netlink.h"
49 #include "netnsid.h"
50 #include "odp-util.h"
51 #include "openvswitch/dynamic-string.h"
52 #include "openvswitch/flow.h"
53 #include "openvswitch/hmap.h"
54 #include "openvswitch/match.h"
55 #include "openvswitch/ofpbuf.h"
56 #include "openvswitch/poll-loop.h"
57 #include "openvswitch/shash.h"
58 #include "openvswitch/thread.h"
59 #include "openvswitch/vlog.h"
60 #include "packets.h"
61 #include "random.h"
62 #include "sset.h"
63 #include "timeval.h"
64 #include "unaligned.h"
65 #include "util.h"
66
67 VLOG_DEFINE_THIS_MODULE(dpif_netlink);
68 #ifdef _WIN32
69 #include "wmi.h"
70 enum { WINDOWS = 1 };
71 #else
72 enum { WINDOWS = 0 };
73 #endif
74 enum { MAX_PORTS = USHRT_MAX };
75
76 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
77 * missing if we have old headers. */
78 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
79
80 #define FLOW_DUMP_MAX_BATCH 50
81 #define OPERATE_MAX_OPS 50
82
83 #ifndef EPOLLEXCLUSIVE
84 #define EPOLLEXCLUSIVE (1u << 28)
85 #endif
86
87 struct dpif_netlink_dp {
88 /* Generic Netlink header. */
89 uint8_t cmd;
90
91 /* struct ovs_header. */
92 int dp_ifindex;
93
94 /* Attributes. */
95 const char *name; /* OVS_DP_ATTR_NAME. */
96 const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
97 uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
98 const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
99 const struct ovs_dp_megaflow_stats *megaflow_stats;
100 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
101 };
102
103 static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
104 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
105 const struct ofpbuf *);
106 static void dpif_netlink_dp_dump_start(struct nl_dump *);
107 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
108 struct dpif_netlink_dp *reply,
109 struct ofpbuf **bufp);
110 static int dpif_netlink_dp_get(const struct dpif *,
111 struct dpif_netlink_dp *reply,
112 struct ofpbuf **bufp);
113 static int
114 dpif_netlink_set_features(struct dpif *dpif_, uint32_t new_features);
115
116 struct dpif_netlink_flow {
117 /* Generic Netlink header. */
118 uint8_t cmd;
119
120 /* struct ovs_header. */
121 unsigned int nlmsg_flags;
122 int dp_ifindex;
123
124 /* Attributes.
125 *
126 * The 'stats' member points to 64-bit data that might only be aligned on
127 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
128 * values.
129 *
130 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
131 * the Netlink version of the command, even if actions_len is zero. */
132 const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
133 size_t key_len;
134 const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
135 size_t mask_len;
136 const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
137 size_t actions_len;
138 ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
139 bool ufid_present; /* Is there a UFID? */
140 bool ufid_terse; /* Skip serializing key/mask/acts? */
141 const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
142 const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
143 const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
144 bool clear; /* OVS_FLOW_ATTR_CLEAR. */
145 bool probe; /* OVS_FLOW_ATTR_PROBE. */
146 };
147
148 static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
149 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
150 const struct ofpbuf *);
151 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
152 struct ofpbuf *);
153 static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
154 struct dpif_netlink_flow *reply,
155 struct ofpbuf **bufp);
156 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
157 struct dpif_flow_stats *);
158 static void dpif_netlink_flow_to_dpif_flow(struct dpif_flow *,
159 const struct dpif_netlink_flow *);
160
161 /* One of the dpif channels between the kernel and userspace. */
162 struct dpif_channel {
163 struct nl_sock *sock; /* Netlink socket. */
164 long long int last_poll; /* Last time this channel was polled. */
165 };
166
167 #ifdef _WIN32
168 #define VPORT_SOCK_POOL_SIZE 1
169 /* On Windows, there is no native support for epoll. There are equivalent
170 * interfaces though, that are not used currently. For simpicity, a pool of
171 * netlink sockets is used. Each socket is represented by 'struct
172 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
173 * sharing the same socket. In the future, we can add a reference count and
174 * such fields. */
175 struct dpif_windows_vport_sock {
176 struct nl_sock *nl_sock; /* netlink socket. */
177 };
178 #endif
179
180 struct dpif_handler {
181 struct epoll_event *epoll_events;
182 int epoll_fd; /* epoll fd that includes channel socks. */
183 int n_events; /* Num events returned by epoll_wait(). */
184 int event_offset; /* Offset into 'epoll_events'. */
185
186 #ifdef _WIN32
187 /* Pool of sockets. */
188 struct dpif_windows_vport_sock *vport_sock_pool;
189 size_t last_used_pool_idx; /* Index to aid in allocating a
190 socket in the pool to a port. */
191 #endif
192 };
193
194 /* Datapath interface for the openvswitch Linux kernel module. */
195 struct dpif_netlink {
196 struct dpif dpif;
197 int dp_ifindex;
198 uint32_t user_features;
199
200 /* Upcall messages. */
201 struct fat_rwlock upcall_lock;
202 struct dpif_handler *handlers;
203 uint32_t n_handlers; /* Num of upcall handlers. */
204 struct dpif_channel *channels; /* Array of channels for each port. */
205 int uc_array_size; /* Size of 'handler->channels' and */
206 /* 'handler->epoll_events'. */
207
208 /* Change notification. */
209 struct nl_sock *port_notifier; /* vport multicast group subscriber. */
210 bool refresh_channels;
211 };
212
213 static void report_loss(struct dpif_netlink *, struct dpif_channel *,
214 uint32_t ch_idx, uint32_t handler_id);
215
216 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
217
218 /* Generic Netlink family numbers for OVS.
219 *
220 * Initialized by dpif_netlink_init(). */
221 static int ovs_datapath_family;
222 static int ovs_vport_family;
223 static int ovs_flow_family;
224 static int ovs_packet_family;
225 static int ovs_meter_family;
226 static int ovs_ct_limit_family;
227
228 /* Generic Netlink multicast groups for OVS.
229 *
230 * Initialized by dpif_netlink_init(). */
231 static unsigned int ovs_vport_mcgroup;
232
233 /* If true, tunnel devices are created using OVS compat/genetlink.
234 * If false, tunnel devices are created with rtnetlink and using light weight
235 * tunnels. If we fail to create the tunnel the rtnetlink+LWT, then we fallback
236 * to using the compat interface. */
237 static bool ovs_tunnels_out_of_tree = true;
238
239 static int dpif_netlink_init(void);
240 static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
241 static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
242 odp_port_t port_no);
243 static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
244 static int dpif_netlink_refresh_channels(struct dpif_netlink *,
245 uint32_t n_handlers);
246 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
247 struct ofpbuf *);
248 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
249 const struct ofpbuf *);
250 static int dpif_netlink_port_query__(const struct dpif_netlink *dpif,
251 odp_port_t port_no, const char *port_name,
252 struct dpif_port *dpif_port);
253
254 static int
255 create_nl_sock(struct dpif_netlink *dpif OVS_UNUSED, struct nl_sock **sockp)
256 OVS_REQ_WRLOCK(dpif->upcall_lock)
257 {
258 #ifndef _WIN32
259 return nl_sock_create(NETLINK_GENERIC, sockp);
260 #else
261 /* Pick netlink sockets to use in a round-robin fashion from each
262 * handler's pool of sockets. */
263 struct dpif_handler *handler = &dpif->handlers[0];
264 struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
265 size_t index = handler->last_used_pool_idx;
266
267 /* A pool of sockets is allocated when the handler is initialized. */
268 if (sock_pool == NULL) {
269 *sockp = NULL;
270 return EINVAL;
271 }
272
273 ovs_assert(index < VPORT_SOCK_POOL_SIZE);
274 *sockp = sock_pool[index].nl_sock;
275 ovs_assert(*sockp);
276 index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
277 handler->last_used_pool_idx = index;
278 return 0;
279 #endif
280 }
281
282 static void
283 close_nl_sock(struct nl_sock *sock)
284 {
285 #ifndef _WIN32
286 nl_sock_destroy(sock);
287 #endif
288 }
289
290 static struct dpif_netlink *
291 dpif_netlink_cast(const struct dpif *dpif)
292 {
293 dpif_assert_class(dpif, &dpif_netlink_class);
294 return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
295 }
296
297 static int
298 dpif_netlink_enumerate(struct sset *all_dps,
299 const struct dpif_class *dpif_class OVS_UNUSED)
300 {
301 struct nl_dump dump;
302 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
303 struct ofpbuf msg, buf;
304 int error;
305
306 error = dpif_netlink_init();
307 if (error) {
308 return error;
309 }
310
311 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
312 dpif_netlink_dp_dump_start(&dump);
313 while (nl_dump_next(&dump, &msg, &buf)) {
314 struct dpif_netlink_dp dp;
315
316 if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
317 sset_add(all_dps, dp.name);
318 }
319 }
320 ofpbuf_uninit(&buf);
321 return nl_dump_done(&dump);
322 }
323
324 static int
325 dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
326 bool create, struct dpif **dpifp)
327 {
328 struct dpif_netlink_dp dp_request, dp;
329 struct ofpbuf *buf;
330 uint32_t upcall_pid;
331 int error;
332
333 error = dpif_netlink_init();
334 if (error) {
335 return error;
336 }
337
338 /* Create or look up datapath. */
339 dpif_netlink_dp_init(&dp_request);
340 upcall_pid = 0;
341 dp_request.upcall_pid = &upcall_pid;
342 dp_request.name = name;
343
344 if (create) {
345 dp_request.cmd = OVS_DP_CMD_NEW;
346 } else {
347 dp_request.cmd = OVS_DP_CMD_GET;
348
349 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
350 if (error) {
351 return error;
352 }
353 dp_request.user_features = dp.user_features;
354 ofpbuf_delete(buf);
355
356 /* Use OVS_DP_CMD_SET to report user features */
357 dp_request.cmd = OVS_DP_CMD_SET;
358 }
359
360 dp_request.user_features |= OVS_DP_F_UNALIGNED;
361 dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
362 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
363 if (error) {
364 return error;
365 }
366
367 error = open_dpif(&dp, dpifp);
368 dpif_netlink_set_features(*dpifp, OVS_DP_F_TC_RECIRC_SHARING);
369 ofpbuf_delete(buf);
370
371 return error;
372 }
373
374 static int
375 open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
376 {
377 struct dpif_netlink *dpif;
378
379 dpif = xzalloc(sizeof *dpif);
380 dpif->port_notifier = NULL;
381 fat_rwlock_init(&dpif->upcall_lock);
382
383 dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
384 dp->dp_ifindex, dp->dp_ifindex);
385
386 dpif->dp_ifindex = dp->dp_ifindex;
387 dpif->user_features = dp->user_features;
388 *dpifp = &dpif->dpif;
389
390 return 0;
391 }
392
393 #ifdef _WIN32
394 static void
395 vport_delete_sock_pool(struct dpif_handler *handler)
396 OVS_REQ_WRLOCK(dpif->upcall_lock)
397 {
398 if (handler->vport_sock_pool) {
399 uint32_t i;
400 struct dpif_windows_vport_sock *sock_pool =
401 handler->vport_sock_pool;
402
403 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
404 if (sock_pool[i].nl_sock) {
405 nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
406 nl_sock_destroy(sock_pool[i].nl_sock);
407 sock_pool[i].nl_sock = NULL;
408 }
409 }
410
411 free(handler->vport_sock_pool);
412 handler->vport_sock_pool = NULL;
413 }
414 }
415
416 static int
417 vport_create_sock_pool(struct dpif_handler *handler)
418 OVS_REQ_WRLOCK(dpif->upcall_lock)
419 {
420 struct dpif_windows_vport_sock *sock_pool;
421 size_t i;
422 int error = 0;
423
424 sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
425 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
426 error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
427 if (error) {
428 goto error;
429 }
430
431 /* Enable the netlink socket to receive packets. This is equivalent to
432 * calling nl_sock_join_mcgroup() to receive events. */
433 error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
434 if (error) {
435 goto error;
436 }
437 }
438
439 handler->vport_sock_pool = sock_pool;
440 handler->last_used_pool_idx = 0;
441 return 0;
442
443 error:
444 vport_delete_sock_pool(handler);
445 return error;
446 }
447 #endif /* _WIN32 */
448
449 /* Given the port number 'port_idx', extracts the pid of netlink socket
450 * associated to the port and assigns it to 'upcall_pid'. */
451 static bool
452 vport_get_pid(struct dpif_netlink *dpif, uint32_t port_idx,
453 uint32_t *upcall_pid)
454 {
455 /* Since the nl_sock can only be assigned in either all
456 * or none "dpif" channels, the following check
457 * would suffice. */
458 if (!dpif->channels[port_idx].sock) {
459 return false;
460 }
461 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
462
463 *upcall_pid = nl_sock_pid(dpif->channels[port_idx].sock);
464
465 return true;
466 }
467
468 static int
469 vport_add_channel(struct dpif_netlink *dpif, odp_port_t port_no,
470 struct nl_sock *sock)
471 {
472 struct epoll_event event;
473 uint32_t port_idx = odp_to_u32(port_no);
474 size_t i;
475 int error;
476
477 if (dpif->handlers == NULL) {
478 close_nl_sock(sock);
479 return 0;
480 }
481
482 /* We assume that the datapath densely chooses port numbers, which can
483 * therefore be used as an index into 'channels' and 'epoll_events' of
484 * 'dpif'. */
485 if (port_idx >= dpif->uc_array_size) {
486 uint32_t new_size = port_idx + 1;
487
488 if (new_size > MAX_PORTS) {
489 VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
490 dpif_name(&dpif->dpif), port_no);
491 return EFBIG;
492 }
493
494 dpif->channels = xrealloc(dpif->channels,
495 new_size * sizeof *dpif->channels);
496
497 for (i = dpif->uc_array_size; i < new_size; i++) {
498 dpif->channels[i].sock = NULL;
499 }
500
501 for (i = 0; i < dpif->n_handlers; i++) {
502 struct dpif_handler *handler = &dpif->handlers[i];
503
504 handler->epoll_events = xrealloc(handler->epoll_events,
505 new_size * sizeof *handler->epoll_events);
506
507 }
508 dpif->uc_array_size = new_size;
509 }
510
511 memset(&event, 0, sizeof event);
512 event.events = EPOLLIN | EPOLLEXCLUSIVE;
513 event.data.u32 = port_idx;
514
515 for (i = 0; i < dpif->n_handlers; i++) {
516 struct dpif_handler *handler = &dpif->handlers[i];
517
518 #ifndef _WIN32
519 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(sock),
520 &event) < 0) {
521 error = errno;
522 goto error;
523 }
524 #endif
525 }
526 dpif->channels[port_idx].sock = sock;
527 dpif->channels[port_idx].last_poll = LLONG_MIN;
528
529 return 0;
530
531 error:
532 #ifndef _WIN32
533 while (i--) {
534 epoll_ctl(dpif->handlers[i].epoll_fd, EPOLL_CTL_DEL,
535 nl_sock_fd(sock), NULL);
536 }
537 #endif
538 dpif->channels[port_idx].sock = NULL;
539
540 return error;
541 }
542
543 static void
544 vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
545 {
546 uint32_t port_idx = odp_to_u32(port_no);
547 size_t i;
548
549 if (!dpif->handlers || port_idx >= dpif->uc_array_size
550 || !dpif->channels[port_idx].sock) {
551 return;
552 }
553
554 for (i = 0; i < dpif->n_handlers; i++) {
555 struct dpif_handler *handler = &dpif->handlers[i];
556 #ifndef _WIN32
557 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
558 nl_sock_fd(dpif->channels[port_idx].sock), NULL);
559 #endif
560 handler->event_offset = handler->n_events = 0;
561 }
562 #ifndef _WIN32
563 nl_sock_destroy(dpif->channels[port_idx].sock);
564 #endif
565 dpif->channels[port_idx].sock = NULL;
566 }
567
568 static void
569 destroy_all_channels(struct dpif_netlink *dpif)
570 OVS_REQ_WRLOCK(dpif->upcall_lock)
571 {
572 unsigned int i;
573
574 if (!dpif->handlers) {
575 return;
576 }
577
578 for (i = 0; i < dpif->uc_array_size; i++ ) {
579 struct dpif_netlink_vport vport_request;
580 uint32_t upcall_pids = 0;
581
582 if (!dpif->channels[i].sock) {
583 continue;
584 }
585
586 /* Turn off upcalls. */
587 dpif_netlink_vport_init(&vport_request);
588 vport_request.cmd = OVS_VPORT_CMD_SET;
589 vport_request.dp_ifindex = dpif->dp_ifindex;
590 vport_request.port_no = u32_to_odp(i);
591 vport_request.n_upcall_pids = 1;
592 vport_request.upcall_pids = &upcall_pids;
593 dpif_netlink_vport_transact(&vport_request, NULL, NULL);
594
595 vport_del_channels(dpif, u32_to_odp(i));
596 }
597
598 for (i = 0; i < dpif->n_handlers; i++) {
599 struct dpif_handler *handler = &dpif->handlers[i];
600
601 dpif_netlink_handler_uninit(handler);
602 free(handler->epoll_events);
603 }
604 free(dpif->channels);
605 free(dpif->handlers);
606 dpif->handlers = NULL;
607 dpif->channels = NULL;
608 dpif->n_handlers = 0;
609 dpif->uc_array_size = 0;
610 }
611
612 static void
613 dpif_netlink_close(struct dpif *dpif_)
614 {
615 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
616
617 nl_sock_destroy(dpif->port_notifier);
618
619 fat_rwlock_wrlock(&dpif->upcall_lock);
620 destroy_all_channels(dpif);
621 fat_rwlock_unlock(&dpif->upcall_lock);
622
623 fat_rwlock_destroy(&dpif->upcall_lock);
624 free(dpif);
625 }
626
627 static int
628 dpif_netlink_destroy(struct dpif *dpif_)
629 {
630 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
631 struct dpif_netlink_dp dp;
632
633 dpif_netlink_dp_init(&dp);
634 dp.cmd = OVS_DP_CMD_DEL;
635 dp.dp_ifindex = dpif->dp_ifindex;
636 return dpif_netlink_dp_transact(&dp, NULL, NULL);
637 }
638
639 static bool
640 dpif_netlink_run(struct dpif *dpif_)
641 {
642 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
643
644 if (dpif->refresh_channels) {
645 dpif->refresh_channels = false;
646 fat_rwlock_wrlock(&dpif->upcall_lock);
647 dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
648 fat_rwlock_unlock(&dpif->upcall_lock);
649 }
650 return false;
651 }
652
653 static int
654 dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
655 {
656 struct dpif_netlink_dp dp;
657 struct ofpbuf *buf;
658 int error;
659
660 error = dpif_netlink_dp_get(dpif_, &dp, &buf);
661 if (!error) {
662 memset(stats, 0, sizeof *stats);
663
664 if (dp.stats) {
665 stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
666 stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
667 stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
668 stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
669 }
670
671 if (dp.megaflow_stats) {
672 stats->n_masks = dp.megaflow_stats->n_masks;
673 stats->n_mask_hit = get_32aligned_u64(
674 &dp.megaflow_stats->n_mask_hit);
675 } else {
676 stats->n_masks = UINT32_MAX;
677 stats->n_mask_hit = UINT64_MAX;
678 }
679 ofpbuf_delete(buf);
680 }
681 return error;
682 }
683
684 static int
685 dpif_netlink_set_features(struct dpif *dpif_, uint32_t new_features)
686 {
687 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
688 struct dpif_netlink_dp request, reply;
689 struct ofpbuf *bufp;
690 int error;
691
692 dpif_netlink_dp_init(&request);
693 request.cmd = OVS_DP_CMD_SET;
694 request.name = dpif_->base_name;
695 request.dp_ifindex = dpif->dp_ifindex;
696 request.user_features = dpif->user_features | new_features;
697
698 error = dpif_netlink_dp_transact(&request, &reply, &bufp);
699 if (!error) {
700 dpif->user_features = reply.user_features;
701 ofpbuf_delete(bufp);
702 if (!(dpif->user_features & new_features)) {
703 return -EOPNOTSUPP;
704 }
705 }
706
707 return error;
708 }
709
710 static const char *
711 get_vport_type(const struct dpif_netlink_vport *vport)
712 {
713 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
714
715 switch (vport->type) {
716 case OVS_VPORT_TYPE_NETDEV: {
717 const char *type = netdev_get_type_from_name(vport->name);
718
719 return type ? type : "system";
720 }
721
722 case OVS_VPORT_TYPE_INTERNAL:
723 return "internal";
724
725 case OVS_VPORT_TYPE_GENEVE:
726 return "geneve";
727
728 case OVS_VPORT_TYPE_GRE:
729 return "gre";
730
731 case OVS_VPORT_TYPE_VXLAN:
732 return "vxlan";
733
734 case OVS_VPORT_TYPE_LISP:
735 return "lisp";
736
737 case OVS_VPORT_TYPE_STT:
738 return "stt";
739
740 case OVS_VPORT_TYPE_ERSPAN:
741 return "erspan";
742
743 case OVS_VPORT_TYPE_IP6ERSPAN:
744 return "ip6erspan";
745
746 case OVS_VPORT_TYPE_IP6GRE:
747 return "ip6gre";
748
749 case OVS_VPORT_TYPE_GTPU:
750 return "gtpu";
751
752 case OVS_VPORT_TYPE_UNSPEC:
753 case __OVS_VPORT_TYPE_MAX:
754 break;
755 }
756
757 VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
758 vport->dp_ifindex, vport->name, (unsigned int) vport->type);
759 return "unknown";
760 }
761
762 enum ovs_vport_type
763 netdev_to_ovs_vport_type(const char *type)
764 {
765 if (!strcmp(type, "tap") || !strcmp(type, "system")) {
766 return OVS_VPORT_TYPE_NETDEV;
767 } else if (!strcmp(type, "internal")) {
768 return OVS_VPORT_TYPE_INTERNAL;
769 } else if (strstr(type, "stt")) {
770 return OVS_VPORT_TYPE_STT;
771 } else if (!strcmp(type, "geneve")) {
772 return OVS_VPORT_TYPE_GENEVE;
773 } else if (!strcmp(type, "vxlan")) {
774 return OVS_VPORT_TYPE_VXLAN;
775 } else if (!strcmp(type, "lisp")) {
776 return OVS_VPORT_TYPE_LISP;
777 } else if (!strcmp(type, "erspan")) {
778 return OVS_VPORT_TYPE_ERSPAN;
779 } else if (!strcmp(type, "ip6erspan")) {
780 return OVS_VPORT_TYPE_IP6ERSPAN;
781 } else if (!strcmp(type, "ip6gre")) {
782 return OVS_VPORT_TYPE_IP6GRE;
783 } else if (!strcmp(type, "gre")) {
784 return OVS_VPORT_TYPE_GRE;
785 } else if (!strcmp(type, "gtpu")) {
786 return OVS_VPORT_TYPE_GTPU;
787 } else {
788 return OVS_VPORT_TYPE_UNSPEC;
789 }
790 }
791
792 static int
793 dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name,
794 enum ovs_vport_type type,
795 struct ofpbuf *options,
796 odp_port_t *port_nop)
797 OVS_REQ_WRLOCK(dpif->upcall_lock)
798 {
799 struct dpif_netlink_vport request, reply;
800 struct ofpbuf *buf;
801 struct nl_sock *sock = NULL;
802 uint32_t upcall_pids = 0;
803 int error = 0;
804
805 if (dpif->handlers) {
806 error = create_nl_sock(dpif, &sock);
807 if (error) {
808 return error;
809 }
810 }
811
812 dpif_netlink_vport_init(&request);
813 request.cmd = OVS_VPORT_CMD_NEW;
814 request.dp_ifindex = dpif->dp_ifindex;
815 request.type = type;
816 request.name = name;
817
818 request.port_no = *port_nop;
819 if (sock) {
820 upcall_pids = nl_sock_pid(sock);
821 }
822 request.n_upcall_pids = 1;
823 request.upcall_pids = &upcall_pids;
824
825 if (options) {
826 request.options = options->data;
827 request.options_len = options->size;
828 }
829
830 error = dpif_netlink_vport_transact(&request, &reply, &buf);
831 if (!error) {
832 *port_nop = reply.port_no;
833 } else {
834 if (error == EBUSY && *port_nop != ODPP_NONE) {
835 VLOG_INFO("%s: requested port %"PRIu32" is in use",
836 dpif_name(&dpif->dpif), *port_nop);
837 }
838
839 close_nl_sock(sock);
840 goto exit;
841 }
842
843 error = vport_add_channel(dpif, *port_nop, sock);
844 if (error) {
845 VLOG_INFO("%s: could not add channel for port %s",
846 dpif_name(&dpif->dpif), name);
847
848 /* Delete the port. */
849 dpif_netlink_vport_init(&request);
850 request.cmd = OVS_VPORT_CMD_DEL;
851 request.dp_ifindex = dpif->dp_ifindex;
852 request.port_no = *port_nop;
853 dpif_netlink_vport_transact(&request, NULL, NULL);
854 close_nl_sock(sock);
855 goto exit;
856 }
857
858 exit:
859 ofpbuf_delete(buf);
860
861 return error;
862 }
863
864 static int
865 dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev,
866 odp_port_t *port_nop)
867 OVS_REQ_WRLOCK(dpif->upcall_lock)
868 {
869 const struct netdev_tunnel_config *tnl_cfg;
870 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
871 const char *type = netdev_get_type(netdev);
872 uint64_t options_stub[64 / 8];
873 enum ovs_vport_type ovs_type;
874 struct ofpbuf options;
875 const char *name;
876
877 name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
878
879 ovs_type = netdev_to_ovs_vport_type(netdev_get_type(netdev));
880 if (ovs_type == OVS_VPORT_TYPE_UNSPEC) {
881 VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
882 "unsupported type `%s'",
883 dpif_name(&dpif->dpif), name, type);
884 return EINVAL;
885 }
886
887 if (ovs_type == OVS_VPORT_TYPE_NETDEV) {
888 #ifdef _WIN32
889 /* XXX : Map appropiate Windows handle */
890 #else
891 netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
892 #endif
893 }
894
895 #ifdef _WIN32
896 if (ovs_type == OVS_VPORT_TYPE_INTERNAL) {
897 if (!create_wmi_port(name)){
898 VLOG_ERR("Could not create wmi internal port with name:%s", name);
899 return EINVAL;
900 };
901 }
902 #endif
903
904 tnl_cfg = netdev_get_tunnel_config(netdev);
905 if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) {
906 ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
907 if (tnl_cfg->dst_port) {
908 nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
909 ntohs(tnl_cfg->dst_port));
910 }
911 if (tnl_cfg->exts) {
912 size_t ext_ofs;
913 int i;
914
915 ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
916 for (i = 0; i < 32; i++) {
917 if (tnl_cfg->exts & (1 << i)) {
918 nl_msg_put_flag(&options, i);
919 }
920 }
921 nl_msg_end_nested(&options, ext_ofs);
922 }
923 return dpif_netlink_port_add__(dpif, name, ovs_type, &options,
924 port_nop);
925 } else {
926 return dpif_netlink_port_add__(dpif, name, ovs_type, NULL, port_nop);
927 }
928
929 }
930
931 static int
932 dpif_netlink_rtnl_port_create_and_add(struct dpif_netlink *dpif,
933 struct netdev *netdev,
934 odp_port_t *port_nop)
935 OVS_REQ_WRLOCK(dpif->upcall_lock)
936 {
937 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
938 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
939 const char *name;
940 int error;
941
942 error = dpif_netlink_rtnl_port_create(netdev);
943 if (error) {
944 if (error != EOPNOTSUPP) {
945 VLOG_WARN_RL(&rl, "Failed to create %s with rtnetlink: %s",
946 netdev_get_name(netdev), ovs_strerror(error));
947 }
948 return error;
949 }
950
951 name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
952 error = dpif_netlink_port_add__(dpif, name, OVS_VPORT_TYPE_NETDEV, NULL,
953 port_nop);
954 if (error) {
955 dpif_netlink_rtnl_port_destroy(name, netdev_get_type(netdev));
956 }
957 return error;
958 }
959
960 static int
961 dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
962 odp_port_t *port_nop)
963 {
964 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
965 int error = EOPNOTSUPP;
966
967 fat_rwlock_wrlock(&dpif->upcall_lock);
968 if (!ovs_tunnels_out_of_tree) {
969 error = dpif_netlink_rtnl_port_create_and_add(dpif, netdev, port_nop);
970 }
971 if (error) {
972 error = dpif_netlink_port_add_compat(dpif, netdev, port_nop);
973 }
974 fat_rwlock_unlock(&dpif->upcall_lock);
975
976 return error;
977 }
978
979 static int
980 dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
981 OVS_REQ_WRLOCK(dpif->upcall_lock)
982 {
983 struct dpif_netlink_vport vport;
984 struct dpif_port dpif_port;
985 int error;
986
987 error = dpif_netlink_port_query__(dpif, port_no, NULL, &dpif_port);
988 if (error) {
989 return error;
990 }
991
992 dpif_netlink_vport_init(&vport);
993 vport.cmd = OVS_VPORT_CMD_DEL;
994 vport.dp_ifindex = dpif->dp_ifindex;
995 vport.port_no = port_no;
996 #ifdef _WIN32
997 if (!strcmp(dpif_port.type, "internal")) {
998 if (!delete_wmi_port(dpif_port.name)) {
999 VLOG_ERR("Could not delete wmi port with name: %s",
1000 dpif_port.name);
1001 };
1002 }
1003 #endif
1004 error = dpif_netlink_vport_transact(&vport, NULL, NULL);
1005
1006 vport_del_channels(dpif, port_no);
1007
1008 if (!error && !ovs_tunnels_out_of_tree) {
1009 error = dpif_netlink_rtnl_port_destroy(dpif_port.name, dpif_port.type);
1010 if (error == EOPNOTSUPP) {
1011 error = 0;
1012 }
1013 }
1014
1015 dpif_port_destroy(&dpif_port);
1016
1017 return error;
1018 }
1019
1020 static int
1021 dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
1022 {
1023 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1024 int error;
1025
1026 fat_rwlock_wrlock(&dpif->upcall_lock);
1027 error = dpif_netlink_port_del__(dpif, port_no);
1028 fat_rwlock_unlock(&dpif->upcall_lock);
1029
1030 return error;
1031 }
1032
1033 static int
1034 dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
1035 const char *port_name, struct dpif_port *dpif_port)
1036 {
1037 struct dpif_netlink_vport request;
1038 struct dpif_netlink_vport reply;
1039 struct ofpbuf *buf;
1040 int error;
1041
1042 dpif_netlink_vport_init(&request);
1043 request.cmd = OVS_VPORT_CMD_GET;
1044 request.dp_ifindex = dpif->dp_ifindex;
1045 request.port_no = port_no;
1046 request.name = port_name;
1047
1048 error = dpif_netlink_vport_transact(&request, &reply, &buf);
1049 if (!error) {
1050 if (reply.dp_ifindex != request.dp_ifindex) {
1051 /* A query by name reported that 'port_name' is in some datapath
1052 * other than 'dpif', but the caller wants to know about 'dpif'. */
1053 error = ENODEV;
1054 } else if (dpif_port) {
1055 dpif_port->name = xstrdup(reply.name);
1056 dpif_port->type = xstrdup(get_vport_type(&reply));
1057 dpif_port->port_no = reply.port_no;
1058 }
1059 ofpbuf_delete(buf);
1060 }
1061 return error;
1062 }
1063
1064 static int
1065 dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
1066 struct dpif_port *dpif_port)
1067 {
1068 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1069
1070 return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
1071 }
1072
1073 static int
1074 dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
1075 struct dpif_port *dpif_port)
1076 {
1077 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1078
1079 return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
1080 }
1081
1082 static uint32_t
1083 dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
1084 odp_port_t port_no)
1085 OVS_REQ_RDLOCK(dpif->upcall_lock)
1086 {
1087 uint32_t port_idx = odp_to_u32(port_no);
1088 uint32_t pid = 0;
1089
1090 if (dpif->handlers && dpif->uc_array_size > 0) {
1091 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1092 * channel, since it is not heavily loaded. */
1093 uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
1094
1095 /* Needs to check in case the socket pointer is changed in between
1096 * the holding of upcall_lock. A known case happens when the main
1097 * thread deletes the vport while the handler thread is handling
1098 * the upcall from that port. */
1099 if (dpif->channels[idx].sock) {
1100 pid = nl_sock_pid(dpif->channels[idx].sock);
1101 }
1102 }
1103
1104 return pid;
1105 }
1106
1107 static uint32_t
1108 dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no)
1109 {
1110 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1111 uint32_t ret;
1112
1113 fat_rwlock_rdlock(&dpif->upcall_lock);
1114 ret = dpif_netlink_port_get_pid__(dpif, port_no);
1115 fat_rwlock_unlock(&dpif->upcall_lock);
1116
1117 return ret;
1118 }
1119
1120 static int
1121 dpif_netlink_flow_flush(struct dpif *dpif_)
1122 {
1123 const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif_));
1124 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1125 struct dpif_netlink_flow flow;
1126
1127 dpif_netlink_flow_init(&flow);
1128 flow.cmd = OVS_FLOW_CMD_DEL;
1129 flow.dp_ifindex = dpif->dp_ifindex;
1130
1131 if (netdev_is_flow_api_enabled()) {
1132 netdev_ports_flow_flush(dpif_type_str);
1133 }
1134
1135 return dpif_netlink_flow_transact(&flow, NULL, NULL);
1136 }
1137
1138 struct dpif_netlink_port_state {
1139 struct nl_dump dump;
1140 struct ofpbuf buf;
1141 };
1142
1143 static void
1144 dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
1145 struct nl_dump *dump)
1146 {
1147 struct dpif_netlink_vport request;
1148 struct ofpbuf *buf;
1149
1150 dpif_netlink_vport_init(&request);
1151 request.cmd = OVS_VPORT_CMD_GET;
1152 request.dp_ifindex = dpif->dp_ifindex;
1153
1154 buf = ofpbuf_new(1024);
1155 dpif_netlink_vport_to_ofpbuf(&request, buf);
1156 nl_dump_start(dump, NETLINK_GENERIC, buf);
1157 ofpbuf_delete(buf);
1158 }
1159
1160 static int
1161 dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
1162 {
1163 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1164 struct dpif_netlink_port_state *state;
1165
1166 *statep = state = xmalloc(sizeof *state);
1167 dpif_netlink_port_dump_start__(dpif, &state->dump);
1168
1169 ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
1170 return 0;
1171 }
1172
1173 static int
1174 dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
1175 struct nl_dump *dump,
1176 struct dpif_netlink_vport *vport,
1177 struct ofpbuf *buffer)
1178 {
1179 struct ofpbuf buf;
1180 int error;
1181
1182 if (!nl_dump_next(dump, &buf, buffer)) {
1183 return EOF;
1184 }
1185
1186 error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
1187 if (error) {
1188 VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
1189 dpif_name(&dpif->dpif), ovs_strerror(error));
1190 }
1191 return error;
1192 }
1193
1194 static int
1195 dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
1196 struct dpif_port *dpif_port)
1197 {
1198 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1199 struct dpif_netlink_port_state *state = state_;
1200 struct dpif_netlink_vport vport;
1201 int error;
1202
1203 error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
1204 &state->buf);
1205 if (error) {
1206 return error;
1207 }
1208 dpif_port->name = CONST_CAST(char *, vport.name);
1209 dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
1210 dpif_port->port_no = vport.port_no;
1211 return 0;
1212 }
1213
1214 static int
1215 dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
1216 {
1217 struct dpif_netlink_port_state *state = state_;
1218 int error = nl_dump_done(&state->dump);
1219
1220 ofpbuf_uninit(&state->buf);
1221 free(state);
1222 return error;
1223 }
1224
1225 static int
1226 dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
1227 {
1228 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1229
1230 /* Lazily create the Netlink socket to listen for notifications. */
1231 if (!dpif->port_notifier) {
1232 struct nl_sock *sock;
1233 int error;
1234
1235 error = nl_sock_create(NETLINK_GENERIC, &sock);
1236 if (error) {
1237 return error;
1238 }
1239
1240 error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
1241 if (error) {
1242 nl_sock_destroy(sock);
1243 return error;
1244 }
1245 dpif->port_notifier = sock;
1246
1247 /* We have no idea of the current state so report that everything
1248 * changed. */
1249 return ENOBUFS;
1250 }
1251
1252 for (;;) {
1253 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1254 uint64_t buf_stub[4096 / 8];
1255 struct ofpbuf buf;
1256 int error;
1257
1258 ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
1259 error = nl_sock_recv(dpif->port_notifier, &buf, NULL, false);
1260 if (!error) {
1261 struct dpif_netlink_vport vport;
1262
1263 error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
1264 if (!error) {
1265 if (vport.dp_ifindex == dpif->dp_ifindex
1266 && (vport.cmd == OVS_VPORT_CMD_NEW
1267 || vport.cmd == OVS_VPORT_CMD_DEL
1268 || vport.cmd == OVS_VPORT_CMD_SET)) {
1269 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
1270 dpif->dpif.full_name, vport.name, vport.cmd);
1271 if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
1272 dpif->refresh_channels = true;
1273 }
1274 *devnamep = xstrdup(vport.name);
1275 ofpbuf_uninit(&buf);
1276 return 0;
1277 }
1278 }
1279 } else if (error != EAGAIN) {
1280 VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
1281 ovs_strerror(error));
1282 nl_sock_drain(dpif->port_notifier);
1283 error = ENOBUFS;
1284 }
1285
1286 ofpbuf_uninit(&buf);
1287 if (error) {
1288 return error;
1289 }
1290 }
1291 }
1292
1293 static void
1294 dpif_netlink_port_poll_wait(const struct dpif *dpif_)
1295 {
1296 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1297
1298 if (dpif->port_notifier) {
1299 nl_sock_wait(dpif->port_notifier, POLLIN);
1300 } else {
1301 poll_immediate_wake();
1302 }
1303 }
1304
1305 static void
1306 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
1307 const ovs_u128 *ufid, bool terse)
1308 {
1309 if (ufid) {
1310 request->ufid = *ufid;
1311 request->ufid_present = true;
1312 } else {
1313 request->ufid_present = false;
1314 }
1315 request->ufid_terse = terse;
1316 }
1317
1318 static void
1319 dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
1320 const struct nlattr *key, size_t key_len,
1321 const ovs_u128 *ufid, bool terse,
1322 struct dpif_netlink_flow *request)
1323 {
1324 dpif_netlink_flow_init(request);
1325 request->cmd = OVS_FLOW_CMD_GET;
1326 request->dp_ifindex = dpif->dp_ifindex;
1327 request->key = key;
1328 request->key_len = key_len;
1329 dpif_netlink_flow_init_ufid(request, ufid, terse);
1330 }
1331
1332 static void
1333 dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
1334 const struct dpif_flow_get *get,
1335 struct dpif_netlink_flow *request)
1336 {
1337 dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
1338 false, request);
1339 }
1340
1341 static int
1342 dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
1343 const struct nlattr *key, size_t key_len,
1344 const ovs_u128 *ufid, bool terse,
1345 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1346 {
1347 struct dpif_netlink_flow request;
1348
1349 dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
1350 return dpif_netlink_flow_transact(&request, reply, bufp);
1351 }
1352
1353 static int
1354 dpif_netlink_flow_get(const struct dpif_netlink *dpif,
1355 const struct dpif_netlink_flow *flow,
1356 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1357 {
1358 return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
1359 flow->ufid_present ? &flow->ufid : NULL,
1360 false, reply, bufp);
1361 }
1362
1363 static void
1364 dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
1365 const struct dpif_flow_put *put,
1366 struct dpif_netlink_flow *request)
1367 {
1368 static const struct nlattr dummy_action;
1369
1370 dpif_netlink_flow_init(request);
1371 request->cmd = (put->flags & DPIF_FP_CREATE
1372 ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
1373 request->dp_ifindex = dpif->dp_ifindex;
1374 request->key = put->key;
1375 request->key_len = put->key_len;
1376 request->mask = put->mask;
1377 request->mask_len = put->mask_len;
1378 dpif_netlink_flow_init_ufid(request, put->ufid, false);
1379
1380 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1381 request->actions = (put->actions
1382 ? put->actions
1383 : CONST_CAST(struct nlattr *, &dummy_action));
1384 request->actions_len = put->actions_len;
1385 if (put->flags & DPIF_FP_ZERO_STATS) {
1386 request->clear = true;
1387 }
1388 if (put->flags & DPIF_FP_PROBE) {
1389 request->probe = true;
1390 }
1391 request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
1392 }
1393
1394 static void
1395 dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
1396 const struct nlattr *key, size_t key_len,
1397 const ovs_u128 *ufid, bool terse,
1398 struct dpif_netlink_flow *request)
1399 {
1400 dpif_netlink_flow_init(request);
1401 request->cmd = OVS_FLOW_CMD_DEL;
1402 request->dp_ifindex = dpif->dp_ifindex;
1403 request->key = key;
1404 request->key_len = key_len;
1405 dpif_netlink_flow_init_ufid(request, ufid, terse);
1406 }
1407
1408 static void
1409 dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
1410 const struct dpif_flow_del *del,
1411 struct dpif_netlink_flow *request)
1412 {
1413 dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
1414 del->ufid, del->terse, request);
1415 }
1416
1417 struct dpif_netlink_flow_dump {
1418 struct dpif_flow_dump up;
1419 struct nl_dump nl_dump;
1420 atomic_int status;
1421 struct netdev_flow_dump **netdev_dumps;
1422 int netdev_dumps_num; /* Number of netdev_flow_dumps */
1423 struct ovs_mutex netdev_lock; /* Guards the following. */
1424 int netdev_current_dump OVS_GUARDED; /* Shared current dump */
1425 struct dpif_flow_dump_types types; /* Type of dump */
1426 };
1427
1428 static struct dpif_netlink_flow_dump *
1429 dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
1430 {
1431 return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
1432 }
1433
1434 static void
1435 start_netdev_dump(const struct dpif *dpif_,
1436 struct dpif_netlink_flow_dump *dump)
1437 {
1438 ovs_mutex_init(&dump->netdev_lock);
1439
1440 if (!(dump->types.netdev_flows)) {
1441 dump->netdev_dumps_num = 0;
1442 dump->netdev_dumps = NULL;
1443 return;
1444 }
1445
1446 ovs_mutex_lock(&dump->netdev_lock);
1447 dump->netdev_current_dump = 0;
1448 dump->netdev_dumps
1449 = netdev_ports_flow_dump_create(dpif_normalize_type(dpif_type(dpif_)),
1450 &dump->netdev_dumps_num,
1451 dump->up.terse);
1452 ovs_mutex_unlock(&dump->netdev_lock);
1453 }
1454
1455 static void
1456 dpif_netlink_populate_flow_dump_types(struct dpif_netlink_flow_dump *dump,
1457 struct dpif_flow_dump_types *types)
1458 {
1459 if (!types) {
1460 dump->types.ovs_flows = true;
1461 dump->types.netdev_flows = true;
1462 } else {
1463 memcpy(&dump->types, types, sizeof *types);
1464 }
1465 }
1466
1467 static struct dpif_flow_dump *
1468 dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse,
1469 struct dpif_flow_dump_types *types)
1470 {
1471 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1472 struct dpif_netlink_flow_dump *dump;
1473 struct dpif_netlink_flow request;
1474 struct ofpbuf *buf;
1475
1476 dump = xmalloc(sizeof *dump);
1477 dpif_flow_dump_init(&dump->up, dpif_);
1478
1479 dpif_netlink_populate_flow_dump_types(dump, types);
1480
1481 if (dump->types.ovs_flows) {
1482 dpif_netlink_flow_init(&request);
1483 request.cmd = OVS_FLOW_CMD_GET;
1484 request.dp_ifindex = dpif->dp_ifindex;
1485 request.ufid_present = false;
1486 request.ufid_terse = terse;
1487
1488 buf = ofpbuf_new(1024);
1489 dpif_netlink_flow_to_ofpbuf(&request, buf);
1490 nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
1491 ofpbuf_delete(buf);
1492 }
1493 atomic_init(&dump->status, 0);
1494 dump->up.terse = terse;
1495
1496 start_netdev_dump(dpif_, dump);
1497
1498 return &dump->up;
1499 }
1500
1501 static int
1502 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
1503 {
1504 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1505 unsigned int nl_status = 0;
1506 int dump_status;
1507
1508 if (dump->types.ovs_flows) {
1509 nl_status = nl_dump_done(&dump->nl_dump);
1510 }
1511
1512 for (int i = 0; i < dump->netdev_dumps_num; i++) {
1513 int err = netdev_flow_dump_destroy(dump->netdev_dumps[i]);
1514
1515 if (err != 0 && err != EOPNOTSUPP) {
1516 VLOG_ERR("failed dumping netdev: %s", ovs_strerror(err));
1517 }
1518 }
1519
1520 free(dump->netdev_dumps);
1521 ovs_mutex_destroy(&dump->netdev_lock);
1522
1523 /* No other thread has access to 'dump' at this point. */
1524 atomic_read_relaxed(&dump->status, &dump_status);
1525 free(dump);
1526 return dump_status ? dump_status : nl_status;
1527 }
1528
1529 struct dpif_netlink_flow_dump_thread {
1530 struct dpif_flow_dump_thread up;
1531 struct dpif_netlink_flow_dump *dump;
1532 struct dpif_netlink_flow flow;
1533 struct dpif_flow_stats stats;
1534 struct ofpbuf nl_flows; /* Always used to store flows. */
1535 struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
1536 int netdev_dump_idx; /* This thread current netdev dump index */
1537 bool netdev_done; /* If we are finished dumping netdevs */
1538
1539 /* (Key/Mask/Actions) Buffers for netdev dumping */
1540 struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
1541 struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
1542 struct odputil_keybuf actbuf[FLOW_DUMP_MAX_BATCH];
1543 };
1544
1545 static struct dpif_netlink_flow_dump_thread *
1546 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
1547 {
1548 return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
1549 }
1550
1551 static struct dpif_flow_dump_thread *
1552 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
1553 {
1554 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1555 struct dpif_netlink_flow_dump_thread *thread;
1556
1557 thread = xmalloc(sizeof *thread);
1558 dpif_flow_dump_thread_init(&thread->up, &dump->up);
1559 thread->dump = dump;
1560 ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
1561 thread->nl_actions = NULL;
1562 thread->netdev_dump_idx = 0;
1563 thread->netdev_done = !(thread->netdev_dump_idx < dump->netdev_dumps_num);
1564
1565 return &thread->up;
1566 }
1567
1568 static void
1569 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
1570 {
1571 struct dpif_netlink_flow_dump_thread *thread
1572 = dpif_netlink_flow_dump_thread_cast(thread_);
1573
1574 ofpbuf_uninit(&thread->nl_flows);
1575 ofpbuf_delete(thread->nl_actions);
1576 free(thread);
1577 }
1578
1579 static void
1580 dpif_netlink_flow_to_dpif_flow(struct dpif_flow *dpif_flow,
1581 const struct dpif_netlink_flow *datapath_flow)
1582 {
1583 dpif_flow->key = datapath_flow->key;
1584 dpif_flow->key_len = datapath_flow->key_len;
1585 dpif_flow->mask = datapath_flow->mask;
1586 dpif_flow->mask_len = datapath_flow->mask_len;
1587 dpif_flow->actions = datapath_flow->actions;
1588 dpif_flow->actions_len = datapath_flow->actions_len;
1589 dpif_flow->ufid_present = datapath_flow->ufid_present;
1590 dpif_flow->pmd_id = PMD_ID_NULL;
1591 if (datapath_flow->ufid_present) {
1592 dpif_flow->ufid = datapath_flow->ufid;
1593 } else {
1594 ovs_assert(datapath_flow->key && datapath_flow->key_len);
1595 odp_flow_key_hash(datapath_flow->key, datapath_flow->key_len,
1596 &dpif_flow->ufid);
1597 }
1598 dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
1599 dpif_flow->attrs.offloaded = false;
1600 dpif_flow->attrs.dp_layer = "ovs";
1601 dpif_flow->attrs.dp_extra_info = NULL;
1602 }
1603
1604 /* The design is such that all threads are working together on the first dump
1605 * to the last, in order (at first they all on dump 0).
1606 * When the first thread finds that the given dump is finished,
1607 * they all move to the next. If two or more threads find the same dump
1608 * is finished at the same time, the first one will advance the shared
1609 * netdev_current_dump and the others will catch up. */
1610 static void
1611 dpif_netlink_advance_netdev_dump(struct dpif_netlink_flow_dump_thread *thread)
1612 {
1613 struct dpif_netlink_flow_dump *dump = thread->dump;
1614
1615 ovs_mutex_lock(&dump->netdev_lock);
1616 /* if we haven't finished (dumped everything) */
1617 if (dump->netdev_current_dump < dump->netdev_dumps_num) {
1618 /* if we are the first to find that current dump is finished
1619 * advance it. */
1620 if (thread->netdev_dump_idx == dump->netdev_current_dump) {
1621 thread->netdev_dump_idx = ++dump->netdev_current_dump;
1622 /* did we just finish the last dump? done. */
1623 if (dump->netdev_current_dump == dump->netdev_dumps_num) {
1624 thread->netdev_done = true;
1625 }
1626 } else {
1627 /* otherwise, we are behind, catch up */
1628 thread->netdev_dump_idx = dump->netdev_current_dump;
1629 }
1630 } else {
1631 /* some other thread finished */
1632 thread->netdev_done = true;
1633 }
1634 ovs_mutex_unlock(&dump->netdev_lock);
1635 }
1636
1637 static int
1638 dpif_netlink_netdev_match_to_dpif_flow(struct match *match,
1639 struct ofpbuf *key_buf,
1640 struct ofpbuf *mask_buf,
1641 struct nlattr *actions,
1642 struct dpif_flow_stats *stats,
1643 struct dpif_flow_attrs *attrs,
1644 ovs_u128 *ufid,
1645 struct dpif_flow *flow,
1646 bool terse)
1647 {
1648 memset(flow, 0, sizeof *flow);
1649
1650 if (!terse) {
1651 struct odp_flow_key_parms odp_parms = {
1652 .flow = &match->flow,
1653 .mask = &match->wc.masks,
1654 .support = {
1655 .max_vlan_headers = 2,
1656 .recirc = true,
1657 .ct_state = true,
1658 .ct_zone = true,
1659 .ct_mark = true,
1660 .ct_label = true,
1661 },
1662 };
1663 size_t offset;
1664
1665 /* Key */
1666 offset = key_buf->size;
1667 flow->key = ofpbuf_tail(key_buf);
1668 odp_flow_key_from_flow(&odp_parms, key_buf);
1669 flow->key_len = key_buf->size - offset;
1670
1671 /* Mask */
1672 offset = mask_buf->size;
1673 flow->mask = ofpbuf_tail(mask_buf);
1674 odp_parms.key_buf = key_buf;
1675 odp_flow_key_from_mask(&odp_parms, mask_buf);
1676 flow->mask_len = mask_buf->size - offset;
1677
1678 /* Actions */
1679 flow->actions = nl_attr_get(actions);
1680 flow->actions_len = nl_attr_get_size(actions);
1681 }
1682
1683 /* Stats */
1684 memcpy(&flow->stats, stats, sizeof *stats);
1685
1686 /* UFID */
1687 flow->ufid_present = true;
1688 flow->ufid = *ufid;
1689
1690 flow->pmd_id = PMD_ID_NULL;
1691
1692 memcpy(&flow->attrs, attrs, sizeof *attrs);
1693
1694 return 0;
1695 }
1696
1697 static int
1698 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
1699 struct dpif_flow *flows, int max_flows)
1700 {
1701 struct dpif_netlink_flow_dump_thread *thread
1702 = dpif_netlink_flow_dump_thread_cast(thread_);
1703 struct dpif_netlink_flow_dump *dump = thread->dump;
1704 struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
1705 int n_flows;
1706
1707 ofpbuf_delete(thread->nl_actions);
1708 thread->nl_actions = NULL;
1709
1710 n_flows = 0;
1711 max_flows = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
1712
1713 while (!thread->netdev_done && n_flows < max_flows) {
1714 struct odputil_keybuf *maskbuf = &thread->maskbuf[n_flows];
1715 struct odputil_keybuf *keybuf = &thread->keybuf[n_flows];
1716 struct odputil_keybuf *actbuf = &thread->actbuf[n_flows];
1717 struct ofpbuf key, mask, act;
1718 struct dpif_flow *f = &flows[n_flows];
1719 int cur = thread->netdev_dump_idx;
1720 struct netdev_flow_dump *netdev_dump = dump->netdev_dumps[cur];
1721 struct match match;
1722 struct nlattr *actions;
1723 struct dpif_flow_stats stats;
1724 struct dpif_flow_attrs attrs;
1725 ovs_u128 ufid;
1726 bool has_next;
1727
1728 ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
1729 ofpbuf_use_stack(&act, actbuf, sizeof *actbuf);
1730 ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
1731 has_next = netdev_flow_dump_next(netdev_dump, &match,
1732 &actions, &stats, &attrs,
1733 &ufid,
1734 &thread->nl_flows,
1735 &act);
1736 if (has_next) {
1737 dpif_netlink_netdev_match_to_dpif_flow(&match,
1738 &key, &mask,
1739 actions,
1740 &stats,
1741 &attrs,
1742 &ufid,
1743 f,
1744 dump->up.terse);
1745 n_flows++;
1746 } else {
1747 dpif_netlink_advance_netdev_dump(thread);
1748 }
1749 }
1750
1751 if (!(dump->types.ovs_flows)) {
1752 return n_flows;
1753 }
1754
1755 while (!n_flows
1756 || (n_flows < max_flows && thread->nl_flows.size)) {
1757 struct dpif_netlink_flow datapath_flow;
1758 struct ofpbuf nl_flow;
1759 int error;
1760
1761 /* Try to grab another flow. */
1762 if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
1763 break;
1764 }
1765
1766 /* Convert the flow to our output format. */
1767 error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
1768 if (error) {
1769 atomic_store_relaxed(&dump->status, error);
1770 break;
1771 }
1772
1773 if (dump->up.terse || datapath_flow.actions) {
1774 /* Common case: we don't want actions, or the flow includes
1775 * actions. */
1776 dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow);
1777 } else {
1778 /* Rare case: the flow does not include actions. Retrieve this
1779 * individual flow again to get the actions. */
1780 error = dpif_netlink_flow_get(dpif, &datapath_flow,
1781 &datapath_flow, &thread->nl_actions);
1782 if (error == ENOENT) {
1783 VLOG_DBG("dumped flow disappeared on get");
1784 continue;
1785 } else if (error) {
1786 VLOG_WARN("error fetching dumped flow: %s",
1787 ovs_strerror(error));
1788 atomic_store_relaxed(&dump->status, error);
1789 break;
1790 }
1791
1792 /* Save this flow. Then exit, because we only have one buffer to
1793 * handle this case. */
1794 dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow);
1795 break;
1796 }
1797 }
1798 return n_flows;
1799 }
1800
1801 static void
1802 dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
1803 struct ofpbuf *buf)
1804 {
1805 struct ovs_header *k_exec;
1806 size_t key_ofs;
1807
1808 ofpbuf_prealloc_tailroom(buf, (64
1809 + dp_packet_size(d_exec->packet)
1810 + ODP_KEY_METADATA_SIZE
1811 + d_exec->actions_len));
1812
1813 nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
1814 OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
1815
1816 k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
1817 k_exec->dp_ifindex = dp_ifindex;
1818
1819 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
1820 dp_packet_data(d_exec->packet),
1821 dp_packet_size(d_exec->packet));
1822
1823 key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
1824 odp_key_from_dp_packet(buf, d_exec->packet);
1825 nl_msg_end_nested(buf, key_ofs);
1826
1827 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
1828 d_exec->actions, d_exec->actions_len);
1829 if (d_exec->probe) {
1830 nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE);
1831 }
1832 if (d_exec->mtu) {
1833 nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu);
1834 }
1835
1836 if (d_exec->hash) {
1837 nl_msg_put_u64(buf, OVS_PACKET_ATTR_HASH, d_exec->hash);
1838 }
1839 }
1840
1841 /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
1842 * Returns the number actually executed (at least 1, if 'n_ops' is
1843 * positive). */
1844 static size_t
1845 dpif_netlink_operate__(struct dpif_netlink *dpif,
1846 struct dpif_op **ops, size_t n_ops)
1847 {
1848 struct op_auxdata {
1849 struct nl_transaction txn;
1850
1851 struct ofpbuf request;
1852 uint64_t request_stub[1024 / 8];
1853
1854 struct ofpbuf reply;
1855 uint64_t reply_stub[1024 / 8];
1856 } auxes[OPERATE_MAX_OPS];
1857
1858 struct nl_transaction *txnsp[OPERATE_MAX_OPS];
1859 size_t i;
1860
1861 n_ops = MIN(n_ops, OPERATE_MAX_OPS);
1862 for (i = 0; i < n_ops; i++) {
1863 struct op_auxdata *aux = &auxes[i];
1864 struct dpif_op *op = ops[i];
1865 struct dpif_flow_put *put;
1866 struct dpif_flow_del *del;
1867 struct dpif_flow_get *get;
1868 struct dpif_netlink_flow flow;
1869
1870 ofpbuf_use_stub(&aux->request,
1871 aux->request_stub, sizeof aux->request_stub);
1872 aux->txn.request = &aux->request;
1873
1874 ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
1875 aux->txn.reply = NULL;
1876
1877 switch (op->type) {
1878 case DPIF_OP_FLOW_PUT:
1879 put = &op->flow_put;
1880 dpif_netlink_init_flow_put(dpif, put, &flow);
1881 if (put->stats) {
1882 flow.nlmsg_flags |= NLM_F_ECHO;
1883 aux->txn.reply = &aux->reply;
1884 }
1885 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1886 break;
1887
1888 case DPIF_OP_FLOW_DEL:
1889 del = &op->flow_del;
1890 dpif_netlink_init_flow_del(dpif, del, &flow);
1891 if (del->stats) {
1892 flow.nlmsg_flags |= NLM_F_ECHO;
1893 aux->txn.reply = &aux->reply;
1894 }
1895 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1896 break;
1897
1898 case DPIF_OP_EXECUTE:
1899 /* Can't execute a packet that won't fit in a Netlink attribute. */
1900 if (OVS_UNLIKELY(nl_attr_oversized(
1901 dp_packet_size(op->execute.packet)))) {
1902 /* Report an error immediately if this is the first operation.
1903 * Otherwise the easiest thing to do is to postpone to the next
1904 * call (when this will be the first operation). */
1905 if (i == 0) {
1906 VLOG_ERR_RL(&error_rl,
1907 "dropping oversized %"PRIu32"-byte packet",
1908 dp_packet_size(op->execute.packet));
1909 op->error = ENOBUFS;
1910 return 1;
1911 }
1912 n_ops = i;
1913 } else {
1914 dpif_netlink_encode_execute(dpif->dp_ifindex, &op->execute,
1915 &aux->request);
1916 }
1917 break;
1918
1919 case DPIF_OP_FLOW_GET:
1920 get = &op->flow_get;
1921 dpif_netlink_init_flow_get(dpif, get, &flow);
1922 aux->txn.reply = get->buffer;
1923 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1924 break;
1925
1926 default:
1927 OVS_NOT_REACHED();
1928 }
1929 }
1930
1931 for (i = 0; i < n_ops; i++) {
1932 txnsp[i] = &auxes[i].txn;
1933 }
1934 nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
1935
1936 for (i = 0; i < n_ops; i++) {
1937 struct op_auxdata *aux = &auxes[i];
1938 struct nl_transaction *txn = &auxes[i].txn;
1939 struct dpif_op *op = ops[i];
1940 struct dpif_flow_put *put;
1941 struct dpif_flow_del *del;
1942 struct dpif_flow_get *get;
1943
1944 op->error = txn->error;
1945
1946 switch (op->type) {
1947 case DPIF_OP_FLOW_PUT:
1948 put = &op->flow_put;
1949 if (put->stats) {
1950 if (!op->error) {
1951 struct dpif_netlink_flow reply;
1952
1953 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1954 txn->reply);
1955 if (!op->error) {
1956 dpif_netlink_flow_get_stats(&reply, put->stats);
1957 }
1958 }
1959 }
1960 break;
1961
1962 case DPIF_OP_FLOW_DEL:
1963 del = &op->flow_del;
1964 if (del->stats) {
1965 if (!op->error) {
1966 struct dpif_netlink_flow reply;
1967
1968 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1969 txn->reply);
1970 if (!op->error) {
1971 dpif_netlink_flow_get_stats(&reply, del->stats);
1972 }
1973 }
1974 }
1975 break;
1976
1977 case DPIF_OP_EXECUTE:
1978 break;
1979
1980 case DPIF_OP_FLOW_GET:
1981 get = &op->flow_get;
1982 if (!op->error) {
1983 struct dpif_netlink_flow reply;
1984
1985 op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
1986 if (!op->error) {
1987 dpif_netlink_flow_to_dpif_flow(get->flow, &reply);
1988 }
1989 }
1990 break;
1991
1992 default:
1993 OVS_NOT_REACHED();
1994 }
1995
1996 ofpbuf_uninit(&aux->request);
1997 ofpbuf_uninit(&aux->reply);
1998 }
1999
2000 return n_ops;
2001 }
2002
2003 static int
2004 parse_flow_get(struct dpif_netlink *dpif, struct dpif_flow_get *get)
2005 {
2006 const char *dpif_type_str = dpif_normalize_type(dpif_type(&dpif->dpif));
2007 struct dpif_flow *dpif_flow = get->flow;
2008 struct match match;
2009 struct nlattr *actions;
2010 struct dpif_flow_stats stats;
2011 struct dpif_flow_attrs attrs;
2012 struct ofpbuf buf;
2013 uint64_t act_buf[1024 / 8];
2014 struct odputil_keybuf maskbuf;
2015 struct odputil_keybuf keybuf;
2016 struct odputil_keybuf actbuf;
2017 struct ofpbuf key, mask, act;
2018 int err;
2019
2020 ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
2021 err = netdev_ports_flow_get(dpif_type_str, &match, &actions, get->ufid,
2022 &stats, &attrs, &buf);
2023 if (err) {
2024 return err;
2025 }
2026
2027 VLOG_DBG("found flow from netdev, translating to dpif flow");
2028
2029 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
2030 ofpbuf_use_stack(&act, &actbuf, sizeof actbuf);
2031 ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
2032 dpif_netlink_netdev_match_to_dpif_flow(&match, &key, &mask, actions,
2033 &stats, &attrs,
2034 (ovs_u128 *) get->ufid,
2035 dpif_flow,
2036 false);
2037 ofpbuf_put(get->buffer, nl_attr_get(actions), nl_attr_get_size(actions));
2038 dpif_flow->actions = ofpbuf_at(get->buffer, 0, 0);
2039 dpif_flow->actions_len = nl_attr_get_size(actions);
2040
2041 return 0;
2042 }
2043
2044 static int
2045 parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put)
2046 {
2047 const char *dpif_type_str = dpif_normalize_type(dpif_type(&dpif->dpif));
2048 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
2049 struct match match;
2050 odp_port_t in_port;
2051 const struct nlattr *nla;
2052 size_t left;
2053 struct netdev *dev;
2054 struct offload_info info;
2055 ovs_be16 dst_port = 0;
2056 uint8_t csum_on = false;
2057 int err;
2058
2059 if (put->flags & DPIF_FP_PROBE) {
2060 return EOPNOTSUPP;
2061 }
2062
2063 err = parse_key_and_mask_to_match(put->key, put->key_len, put->mask,
2064 put->mask_len, &match);
2065 if (err) {
2066 return err;
2067 }
2068
2069 in_port = match.flow.in_port.odp_port;
2070 dev = netdev_ports_get(in_port, dpif_type_str);
2071 if (!dev) {
2072 return EOPNOTSUPP;
2073 }
2074
2075 /* Get tunnel dst port */
2076 NL_ATTR_FOR_EACH(nla, left, put->actions, put->actions_len) {
2077 if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
2078 const struct netdev_tunnel_config *tnl_cfg;
2079 struct netdev *outdev;
2080 odp_port_t out_port;
2081
2082 out_port = nl_attr_get_odp_port(nla);
2083 outdev = netdev_ports_get(out_port, dpif_type_str);
2084 if (!outdev) {
2085 err = EOPNOTSUPP;
2086 goto out;
2087 }
2088 tnl_cfg = netdev_get_tunnel_config(outdev);
2089 if (tnl_cfg && tnl_cfg->dst_port != 0) {
2090 dst_port = tnl_cfg->dst_port;
2091 }
2092 if (tnl_cfg) {
2093 csum_on = tnl_cfg->csum;
2094 }
2095 netdev_close(outdev);
2096 }
2097 }
2098
2099 info.tp_dst_port = dst_port;
2100 info.tunnel_csum_on = csum_on;
2101 info.recirc_id_shared_with_tc = (dpif->user_features
2102 & OVS_DP_F_TC_RECIRC_SHARING);
2103 info.tc_modify_flow_deleted = false;
2104 err = netdev_flow_put(dev, &match,
2105 CONST_CAST(struct nlattr *, put->actions),
2106 put->actions_len,
2107 CONST_CAST(ovs_u128 *, put->ufid),
2108 &info, put->stats);
2109
2110 if (!err) {
2111 if (put->flags & DPIF_FP_MODIFY) {
2112 struct dpif_op *opp;
2113 struct dpif_op op;
2114
2115 op.type = DPIF_OP_FLOW_DEL;
2116 op.flow_del.key = put->key;
2117 op.flow_del.key_len = put->key_len;
2118 op.flow_del.ufid = put->ufid;
2119 op.flow_del.pmd_id = put->pmd_id;
2120 op.flow_del.stats = NULL;
2121 op.flow_del.terse = false;
2122
2123 opp = &op;
2124 dpif_netlink_operate__(dpif, &opp, 1);
2125 }
2126
2127 VLOG_DBG("added flow");
2128 } else if (err != EEXIST) {
2129 struct netdev *oor_netdev = NULL;
2130 enum vlog_level level;
2131 if (err == ENOSPC && netdev_is_offload_rebalance_policy_enabled()) {
2132 /*
2133 * We need to set OOR on the input netdev (i.e, 'dev') for the
2134 * flow. But if the flow has a tunnel attribute (i.e, decap action,
2135 * with a virtual device like a VxLAN interface as its in-port),
2136 * then lookup and set OOR on the underlying tunnel (real) netdev.
2137 */
2138 oor_netdev = flow_get_tunnel_netdev(&match.flow.tunnel);
2139 if (!oor_netdev) {
2140 /* Not a 'tunnel' flow */
2141 oor_netdev = dev;
2142 }
2143 netdev_set_hw_info(oor_netdev, HW_INFO_TYPE_OOR, true);
2144 }
2145 level = (err == ENOSPC || err == EOPNOTSUPP) ? VLL_DBG : VLL_ERR;
2146 VLOG_RL(&rl, level, "failed to offload flow: %s: %s",
2147 ovs_strerror(err),
2148 (oor_netdev ? oor_netdev->name : dev->name));
2149 }
2150
2151 out:
2152 if (err && err != EEXIST && (put->flags & DPIF_FP_MODIFY)) {
2153 /* Modified rule can't be offloaded, try and delete from HW */
2154 int del_err = 0;
2155
2156 if (!info.tc_modify_flow_deleted) {
2157 del_err = netdev_flow_del(dev, put->ufid, put->stats);
2158 }
2159
2160 if (!del_err) {
2161 /* Delete from hw success, so old flow was offloaded.
2162 * Change flags to create the flow in kernel */
2163 put->flags &= ~DPIF_FP_MODIFY;
2164 put->flags |= DPIF_FP_CREATE;
2165 } else if (del_err != ENOENT) {
2166 VLOG_ERR_RL(&rl, "failed to delete offloaded flow: %s",
2167 ovs_strerror(del_err));
2168 /* stop proccesing the flow in kernel */
2169 err = 0;
2170 }
2171 }
2172
2173 netdev_close(dev);
2174
2175 return err;
2176 }
2177
2178 static int
2179 try_send_to_netdev(struct dpif_netlink *dpif, struct dpif_op *op)
2180 {
2181 int err = EOPNOTSUPP;
2182
2183 switch (op->type) {
2184 case DPIF_OP_FLOW_PUT: {
2185 struct dpif_flow_put *put = &op->flow_put;
2186
2187 if (!put->ufid) {
2188 break;
2189 }
2190
2191 err = parse_flow_put(dpif, put);
2192 log_flow_put_message(&dpif->dpif, &this_module, put, 0);
2193 break;
2194 }
2195 case DPIF_OP_FLOW_DEL: {
2196 struct dpif_flow_del *del = &op->flow_del;
2197
2198 if (!del->ufid) {
2199 break;
2200 }
2201
2202 err = netdev_ports_flow_del(
2203 dpif_normalize_type(dpif_type(&dpif->dpif)),
2204 del->ufid,
2205 del->stats);
2206 log_flow_del_message(&dpif->dpif, &this_module, del, 0);
2207 break;
2208 }
2209 case DPIF_OP_FLOW_GET: {
2210 struct dpif_flow_get *get = &op->flow_get;
2211
2212 if (!op->flow_get.ufid) {
2213 break;
2214 }
2215
2216 err = parse_flow_get(dpif, get);
2217 log_flow_get_message(&dpif->dpif, &this_module, get, 0);
2218 break;
2219 }
2220 case DPIF_OP_EXECUTE:
2221 default:
2222 break;
2223 }
2224
2225 return err;
2226 }
2227
2228 static void
2229 dpif_netlink_operate_chunks(struct dpif_netlink *dpif, struct dpif_op **ops,
2230 size_t n_ops)
2231 {
2232 while (n_ops > 0) {
2233 size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops);
2234
2235 ops += chunk;
2236 n_ops -= chunk;
2237 }
2238 }
2239
2240 static void
2241 dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops,
2242 enum dpif_offload_type offload_type)
2243 {
2244 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2245 struct dpif_op *new_ops[OPERATE_MAX_OPS];
2246 int count = 0;
2247 int i = 0;
2248 int err = 0;
2249
2250 if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
2251 VLOG_DBG("Invalid offload_type: %d", offload_type);
2252 return;
2253 }
2254
2255 if (offload_type != DPIF_OFFLOAD_NEVER && netdev_is_flow_api_enabled()) {
2256 while (n_ops > 0) {
2257 count = 0;
2258
2259 while (n_ops > 0 && count < OPERATE_MAX_OPS) {
2260 struct dpif_op *op = ops[i++];
2261
2262 err = try_send_to_netdev(dpif, op);
2263 if (err && err != EEXIST) {
2264 if (offload_type == DPIF_OFFLOAD_ALWAYS) {
2265 /* We got an error while offloading an op. Since
2266 * OFFLOAD_ALWAYS is specified, we stop further
2267 * processing and return to the caller without
2268 * invoking kernel datapath as fallback. But the
2269 * interface requires us to process all n_ops; so
2270 * return the same error in the remaining ops too.
2271 */
2272 op->error = err;
2273 n_ops--;
2274 while (n_ops > 0) {
2275 op = ops[i++];
2276 op->error = err;
2277 n_ops--;
2278 }
2279 return;
2280 }
2281 new_ops[count++] = op;
2282 } else {
2283 op->error = err;
2284 }
2285
2286 n_ops--;
2287 }
2288
2289 dpif_netlink_operate_chunks(dpif, new_ops, count);
2290 }
2291 } else if (offload_type != DPIF_OFFLOAD_ALWAYS) {
2292 dpif_netlink_operate_chunks(dpif, ops, n_ops);
2293 }
2294 }
2295
2296 #if _WIN32
2297 static void
2298 dpif_netlink_handler_uninit(struct dpif_handler *handler)
2299 {
2300 vport_delete_sock_pool(handler);
2301 }
2302
2303 static int
2304 dpif_netlink_handler_init(struct dpif_handler *handler)
2305 {
2306 return vport_create_sock_pool(handler);
2307 }
2308 #else
2309
2310 static int
2311 dpif_netlink_handler_init(struct dpif_handler *handler)
2312 {
2313 handler->epoll_fd = epoll_create(10);
2314 return handler->epoll_fd < 0 ? errno : 0;
2315 }
2316
2317 static void
2318 dpif_netlink_handler_uninit(struct dpif_handler *handler)
2319 {
2320 close(handler->epoll_fd);
2321 }
2322 #endif
2323
2324 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
2325 * currently in 'dpif' in the kernel, by adding a new set of channels for
2326 * any kernel vport that lacks one and deleting any channels that have no
2327 * backing kernel vports. */
2328 static int
2329 dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
2330 OVS_REQ_WRLOCK(dpif->upcall_lock)
2331 {
2332 unsigned long int *keep_channels;
2333 struct dpif_netlink_vport vport;
2334 size_t keep_channels_nbits;
2335 struct nl_dump dump;
2336 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
2337 struct ofpbuf buf;
2338 int retval = 0;
2339 size_t i;
2340
2341 ovs_assert(!WINDOWS || n_handlers <= 1);
2342 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
2343
2344 if (dpif->n_handlers != n_handlers) {
2345 destroy_all_channels(dpif);
2346 dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
2347 for (i = 0; i < n_handlers; i++) {
2348 int error;
2349 struct dpif_handler *handler = &dpif->handlers[i];
2350
2351 error = dpif_netlink_handler_init(handler);
2352 if (error) {
2353 size_t j;
2354
2355 for (j = 0; j < i; j++) {
2356 struct dpif_handler *tmp = &dpif->handlers[j];
2357 dpif_netlink_handler_uninit(tmp);
2358 }
2359 free(dpif->handlers);
2360 dpif->handlers = NULL;
2361
2362 return error;
2363 }
2364 }
2365 dpif->n_handlers = n_handlers;
2366 }
2367
2368 for (i = 0; i < n_handlers; i++) {
2369 struct dpif_handler *handler = &dpif->handlers[i];
2370
2371 handler->event_offset = handler->n_events = 0;
2372 }
2373
2374 keep_channels_nbits = dpif->uc_array_size;
2375 keep_channels = bitmap_allocate(keep_channels_nbits);
2376
2377 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
2378 dpif_netlink_port_dump_start__(dpif, &dump);
2379 while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
2380 uint32_t port_no = odp_to_u32(vport.port_no);
2381 uint32_t upcall_pid;
2382 int error;
2383
2384 if (port_no >= dpif->uc_array_size
2385 || !vport_get_pid(dpif, port_no, &upcall_pid)) {
2386 struct nl_sock *sock;
2387 error = create_nl_sock(dpif, &sock);
2388
2389 if (error) {
2390 goto error;
2391 }
2392
2393 error = vport_add_channel(dpif, vport.port_no, sock);
2394 if (error) {
2395 VLOG_INFO("%s: could not add channels for port %s",
2396 dpif_name(&dpif->dpif), vport.name);
2397 nl_sock_destroy(sock);
2398 retval = error;
2399 goto error;
2400 }
2401 upcall_pid = nl_sock_pid(sock);
2402 }
2403
2404 /* Configure the vport to deliver misses to 'sock'. */
2405 if (vport.upcall_pids[0] == 0
2406 || vport.n_upcall_pids != 1
2407 || upcall_pid != vport.upcall_pids[0]) {
2408 struct dpif_netlink_vport vport_request;
2409
2410 dpif_netlink_vport_init(&vport_request);
2411 vport_request.cmd = OVS_VPORT_CMD_SET;
2412 vport_request.dp_ifindex = dpif->dp_ifindex;
2413 vport_request.port_no = vport.port_no;
2414 vport_request.n_upcall_pids = 1;
2415 vport_request.upcall_pids = &upcall_pid;
2416 error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
2417 if (error) {
2418 VLOG_WARN_RL(&error_rl,
2419 "%s: failed to set upcall pid on port: %s",
2420 dpif_name(&dpif->dpif), ovs_strerror(error));
2421
2422 if (error != ENODEV && error != ENOENT) {
2423 retval = error;
2424 } else {
2425 /* The vport isn't really there, even though the dump says
2426 * it is. Probably we just hit a race after a port
2427 * disappeared. */
2428 }
2429 goto error;
2430 }
2431 }
2432
2433 if (port_no < keep_channels_nbits) {
2434 bitmap_set1(keep_channels, port_no);
2435 }
2436 continue;
2437
2438 error:
2439 vport_del_channels(dpif, vport.port_no);
2440 }
2441 nl_dump_done(&dump);
2442 ofpbuf_uninit(&buf);
2443
2444 /* Discard any saved channels that we didn't reuse. */
2445 for (i = 0; i < keep_channels_nbits; i++) {
2446 if (!bitmap_is_set(keep_channels, i)) {
2447 vport_del_channels(dpif, u32_to_odp(i));
2448 }
2449 }
2450 free(keep_channels);
2451
2452 return retval;
2453 }
2454
2455 static int
2456 dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
2457 OVS_REQ_WRLOCK(dpif->upcall_lock)
2458 {
2459 if ((dpif->handlers != NULL) == enable) {
2460 return 0;
2461 } else if (!enable) {
2462 destroy_all_channels(dpif);
2463 return 0;
2464 } else {
2465 return dpif_netlink_refresh_channels(dpif, 1);
2466 }
2467 }
2468
2469 static int
2470 dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
2471 {
2472 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2473 int error;
2474
2475 fat_rwlock_wrlock(&dpif->upcall_lock);
2476 error = dpif_netlink_recv_set__(dpif, enable);
2477 fat_rwlock_unlock(&dpif->upcall_lock);
2478
2479 return error;
2480 }
2481
2482 static int
2483 dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
2484 {
2485 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2486 int error = 0;
2487
2488 #ifdef _WIN32
2489 /* Multiple upcall handlers will be supported once kernel datapath supports
2490 * it. */
2491 if (n_handlers > 1) {
2492 return error;
2493 }
2494 #endif
2495
2496 fat_rwlock_wrlock(&dpif->upcall_lock);
2497 if (dpif->handlers) {
2498 error = dpif_netlink_refresh_channels(dpif, n_handlers);
2499 }
2500 fat_rwlock_unlock(&dpif->upcall_lock);
2501
2502 return error;
2503 }
2504
2505 static int
2506 dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
2507 uint32_t queue_id, uint32_t *priority)
2508 {
2509 if (queue_id < 0xf000) {
2510 *priority = TC_H_MAKE(1 << 16, queue_id + 1);
2511 return 0;
2512 } else {
2513 return EINVAL;
2514 }
2515 }
2516
2517 static int
2518 parse_odp_packet(struct ofpbuf *buf, struct dpif_upcall *upcall,
2519 int *dp_ifindex)
2520 {
2521 static const struct nl_policy ovs_packet_policy[] = {
2522 /* Always present. */
2523 [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
2524 .min_len = ETH_HEADER_LEN },
2525 [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
2526
2527 /* OVS_PACKET_CMD_ACTION only. */
2528 [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
2529 [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
2530 [OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
2531 [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true },
2532 [OVS_PACKET_ATTR_HASH] = { .type = NL_A_U64, .optional = true }
2533 };
2534
2535 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
2536 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2537 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
2538 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2539
2540 struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
2541 if (!nlmsg || !genl || !ovs_header
2542 || nlmsg->nlmsg_type != ovs_packet_family
2543 || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
2544 ARRAY_SIZE(ovs_packet_policy))) {
2545 return EINVAL;
2546 }
2547
2548 int type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
2549 : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
2550 : -1);
2551 if (type < 0) {
2552 return EINVAL;
2553 }
2554
2555 /* (Re)set ALL fields of '*upcall' on successful return. */
2556 upcall->type = type;
2557 upcall->key = CONST_CAST(struct nlattr *,
2558 nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
2559 upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
2560 odp_flow_key_hash(upcall->key, upcall->key_len, &upcall->ufid);
2561 upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
2562 upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
2563 upcall->actions = a[OVS_PACKET_ATTR_ACTIONS];
2564 upcall->mru = a[OVS_PACKET_ATTR_MRU];
2565 upcall->hash = a[OVS_PACKET_ATTR_HASH];
2566
2567 /* Allow overwriting the netlink attribute header without reallocating. */
2568 dp_packet_use_stub(&upcall->packet,
2569 CONST_CAST(struct nlattr *,
2570 nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
2571 nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
2572 sizeof(struct nlattr));
2573 dp_packet_set_data(&upcall->packet,
2574 (char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr));
2575 dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
2576
2577 if (nl_attr_find__(upcall->key, upcall->key_len, OVS_KEY_ATTR_ETHERNET)) {
2578 /* Ethernet frame */
2579 upcall->packet.packet_type = htonl(PT_ETH);
2580 } else {
2581 /* Non-Ethernet packet. Get the Ethertype from the NL attributes */
2582 ovs_be16 ethertype = 0;
2583 const struct nlattr *et_nla = nl_attr_find__(upcall->key,
2584 upcall->key_len,
2585 OVS_KEY_ATTR_ETHERTYPE);
2586 if (et_nla) {
2587 ethertype = nl_attr_get_be16(et_nla);
2588 }
2589 upcall->packet.packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
2590 ntohs(ethertype));
2591 dp_packet_set_l3(&upcall->packet, dp_packet_data(&upcall->packet));
2592 }
2593
2594 *dp_ifindex = ovs_header->dp_ifindex;
2595
2596 return 0;
2597 }
2598
2599 #ifdef _WIN32
2600 #define PACKET_RECV_BATCH_SIZE 50
2601 static int
2602 dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
2603 struct dpif_upcall *upcall, struct ofpbuf *buf)
2604 OVS_REQ_RDLOCK(dpif->upcall_lock)
2605 {
2606 struct dpif_handler *handler;
2607 int read_tries = 0;
2608 struct dpif_windows_vport_sock *sock_pool;
2609 uint32_t i;
2610
2611 if (!dpif->handlers) {
2612 return EAGAIN;
2613 }
2614
2615 /* Only one handler is supported currently. */
2616 if (handler_id >= 1) {
2617 return EAGAIN;
2618 }
2619
2620 if (handler_id >= dpif->n_handlers) {
2621 return EAGAIN;
2622 }
2623
2624 handler = &dpif->handlers[handler_id];
2625 sock_pool = handler->vport_sock_pool;
2626
2627 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2628 for (;;) {
2629 int dp_ifindex;
2630 int error;
2631
2632 if (++read_tries > PACKET_RECV_BATCH_SIZE) {
2633 return EAGAIN;
2634 }
2635
2636 error = nl_sock_recv(sock_pool[i].nl_sock, buf, NULL, false);
2637 if (error == ENOBUFS) {
2638 /* ENOBUFS typically means that we've received so many
2639 * packets that the buffer overflowed. Try again
2640 * immediately because there's almost certainly a packet
2641 * waiting for us. */
2642 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2643 continue;
2644 }
2645
2646 /* XXX: ch->last_poll = time_msec(); */
2647 if (error) {
2648 if (error == EAGAIN) {
2649 break;
2650 }
2651 return error;
2652 }
2653
2654 error = parse_odp_packet(buf, upcall, &dp_ifindex);
2655 if (!error && dp_ifindex == dpif->dp_ifindex) {
2656 return 0;
2657 } else if (error) {
2658 return error;
2659 }
2660 }
2661 }
2662
2663 return EAGAIN;
2664 }
2665 #else
2666 static int
2667 dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
2668 struct dpif_upcall *upcall, struct ofpbuf *buf)
2669 OVS_REQ_RDLOCK(dpif->upcall_lock)
2670 {
2671 struct dpif_handler *handler;
2672 int read_tries = 0;
2673
2674 if (!dpif->handlers || handler_id >= dpif->n_handlers) {
2675 return EAGAIN;
2676 }
2677
2678 handler = &dpif->handlers[handler_id];
2679 if (handler->event_offset >= handler->n_events) {
2680 int retval;
2681
2682 handler->event_offset = handler->n_events = 0;
2683
2684 do {
2685 retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
2686 dpif->uc_array_size, 0);
2687 } while (retval < 0 && errno == EINTR);
2688
2689 if (retval < 0) {
2690 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2691 VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
2692 } else if (retval > 0) {
2693 handler->n_events = retval;
2694 }
2695 }
2696
2697 while (handler->event_offset < handler->n_events) {
2698 int idx = handler->epoll_events[handler->event_offset].data.u32;
2699 struct dpif_channel *ch = &dpif->channels[idx];
2700
2701 handler->event_offset++;
2702
2703 for (;;) {
2704 int dp_ifindex;
2705 int error;
2706
2707 if (++read_tries > 50) {
2708 return EAGAIN;
2709 }
2710
2711 error = nl_sock_recv(ch->sock, buf, NULL, false);
2712 if (error == ENOBUFS) {
2713 /* ENOBUFS typically means that we've received so many
2714 * packets that the buffer overflowed. Try again
2715 * immediately because there's almost certainly a packet
2716 * waiting for us. */
2717 report_loss(dpif, ch, idx, handler_id);
2718 continue;
2719 }
2720
2721 ch->last_poll = time_msec();
2722 if (error) {
2723 if (error == EAGAIN) {
2724 break;
2725 }
2726 return error;
2727 }
2728
2729 error = parse_odp_packet(buf, upcall, &dp_ifindex);
2730 if (!error && dp_ifindex == dpif->dp_ifindex) {
2731 return 0;
2732 } else if (error) {
2733 return error;
2734 }
2735 }
2736 }
2737
2738 return EAGAIN;
2739 }
2740 #endif
2741
2742 static int
2743 dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
2744 struct dpif_upcall *upcall, struct ofpbuf *buf)
2745 {
2746 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2747 int error;
2748
2749 fat_rwlock_rdlock(&dpif->upcall_lock);
2750 #ifdef _WIN32
2751 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
2752 #else
2753 error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
2754 #endif
2755 fat_rwlock_unlock(&dpif->upcall_lock);
2756
2757 return error;
2758 }
2759
2760 static void
2761 dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
2762 OVS_REQ_RDLOCK(dpif->upcall_lock)
2763 {
2764 #ifdef _WIN32
2765 uint32_t i;
2766 struct dpif_windows_vport_sock *sock_pool =
2767 dpif->handlers[handler_id].vport_sock_pool;
2768
2769 /* Only one handler is supported currently. */
2770 if (handler_id >= 1) {
2771 return;
2772 }
2773
2774 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2775 nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
2776 }
2777 #else
2778 if (dpif->handlers && handler_id < dpif->n_handlers) {
2779 struct dpif_handler *handler = &dpif->handlers[handler_id];
2780
2781 poll_fd_wait(handler->epoll_fd, POLLIN);
2782 }
2783 #endif
2784 }
2785
2786 static void
2787 dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
2788 {
2789 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2790
2791 fat_rwlock_rdlock(&dpif->upcall_lock);
2792 dpif_netlink_recv_wait__(dpif, handler_id);
2793 fat_rwlock_unlock(&dpif->upcall_lock);
2794 }
2795
2796 static void
2797 dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
2798 OVS_REQ_WRLOCK(dpif->upcall_lock)
2799 {
2800 if (dpif->handlers) {
2801 size_t i;
2802
2803 if (!dpif->channels[0].sock) {
2804 return;
2805 }
2806 for (i = 0; i < dpif->uc_array_size; i++ ) {
2807
2808 nl_sock_drain(dpif->channels[i].sock);
2809 }
2810 }
2811 }
2812
2813 static void
2814 dpif_netlink_recv_purge(struct dpif *dpif_)
2815 {
2816 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2817
2818 fat_rwlock_wrlock(&dpif->upcall_lock);
2819 dpif_netlink_recv_purge__(dpif);
2820 fat_rwlock_unlock(&dpif->upcall_lock);
2821 }
2822
2823 static char *
2824 dpif_netlink_get_datapath_version(void)
2825 {
2826 char *version_str = NULL;
2827
2828 #ifdef __linux__
2829
2830 #define MAX_VERSION_STR_SIZE 80
2831 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2832 FILE *f;
2833
2834 f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
2835 if (f) {
2836 char *newline;
2837 char version[MAX_VERSION_STR_SIZE];
2838
2839 if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
2840 newline = strchr(version, '\n');
2841 if (newline) {
2842 *newline = '\0';
2843 }
2844 version_str = xstrdup(version);
2845 }
2846 fclose(f);
2847 }
2848 #endif
2849
2850 return version_str;
2851 }
2852
2853 struct dpif_netlink_ct_dump_state {
2854 struct ct_dpif_dump_state up;
2855 struct nl_ct_dump_state *nl_ct_dump;
2856 };
2857
2858 static int
2859 dpif_netlink_ct_dump_start(struct dpif *dpif OVS_UNUSED,
2860 struct ct_dpif_dump_state **dump_,
2861 const uint16_t *zone, int *ptot_bkts)
2862 {
2863 struct dpif_netlink_ct_dump_state *dump;
2864 int err;
2865
2866 dump = xzalloc(sizeof *dump);
2867 err = nl_ct_dump_start(&dump->nl_ct_dump, zone, ptot_bkts);
2868 if (err) {
2869 free(dump);
2870 return err;
2871 }
2872
2873 *dump_ = &dump->up;
2874
2875 return 0;
2876 }
2877
2878 static int
2879 dpif_netlink_ct_dump_next(struct dpif *dpif OVS_UNUSED,
2880 struct ct_dpif_dump_state *dump_,
2881 struct ct_dpif_entry *entry)
2882 {
2883 struct dpif_netlink_ct_dump_state *dump;
2884
2885 INIT_CONTAINER(dump, dump_, up);
2886
2887 return nl_ct_dump_next(dump->nl_ct_dump, entry);
2888 }
2889
2890 static int
2891 dpif_netlink_ct_dump_done(struct dpif *dpif OVS_UNUSED,
2892 struct ct_dpif_dump_state *dump_)
2893 {
2894 struct dpif_netlink_ct_dump_state *dump;
2895
2896 INIT_CONTAINER(dump, dump_, up);
2897
2898 int err = nl_ct_dump_done(dump->nl_ct_dump);
2899 free(dump);
2900 return err;
2901 }
2902
2903 static int
2904 dpif_netlink_ct_flush(struct dpif *dpif OVS_UNUSED, const uint16_t *zone,
2905 const struct ct_dpif_tuple *tuple)
2906 {
2907 if (tuple) {
2908 return nl_ct_flush_tuple(tuple, zone ? *zone : 0);
2909 } else if (zone) {
2910 return nl_ct_flush_zone(*zone);
2911 } else {
2912 return nl_ct_flush();
2913 }
2914 }
2915
2916 static int
2917 dpif_netlink_ct_set_limits(struct dpif *dpif OVS_UNUSED,
2918 const uint32_t *default_limits,
2919 const struct ovs_list *zone_limits)
2920 {
2921 struct ovs_zone_limit req_zone_limit;
2922
2923 if (ovs_ct_limit_family < 0) {
2924 return EOPNOTSUPP;
2925 }
2926
2927 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
2928 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
2929 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_SET,
2930 OVS_CT_LIMIT_VERSION);
2931
2932 struct ovs_header *ovs_header;
2933 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
2934 ovs_header->dp_ifindex = 0;
2935
2936 size_t opt_offset;
2937 opt_offset = nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2938 if (default_limits) {
2939 req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
2940 req_zone_limit.limit = *default_limits;
2941 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2942 }
2943
2944 if (!ovs_list_is_empty(zone_limits)) {
2945 struct ct_dpif_zone_limit *zone_limit;
2946
2947 LIST_FOR_EACH (zone_limit, node, zone_limits) {
2948 req_zone_limit.zone_id = zone_limit->zone;
2949 req_zone_limit.limit = zone_limit->limit;
2950 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2951 }
2952 }
2953 nl_msg_end_nested(request, opt_offset);
2954
2955 int err = nl_transact(NETLINK_GENERIC, request, NULL);
2956 ofpbuf_delete(request);
2957 return err;
2958 }
2959
2960 static int
2961 dpif_netlink_zone_limits_from_ofpbuf(const struct ofpbuf *buf,
2962 uint32_t *default_limit,
2963 struct ovs_list *zone_limits)
2964 {
2965 static const struct nl_policy ovs_ct_limit_policy[] = {
2966 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NL_A_NESTED,
2967 .optional = true },
2968 };
2969
2970 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
2971 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2972 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
2973 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2974
2975 struct nlattr *attr[ARRAY_SIZE(ovs_ct_limit_policy)];
2976
2977 if (!nlmsg || !genl || !ovs_header
2978 || nlmsg->nlmsg_type != ovs_ct_limit_family
2979 || !nl_policy_parse(&b, 0, ovs_ct_limit_policy, attr,
2980 ARRAY_SIZE(ovs_ct_limit_policy))) {
2981 return EINVAL;
2982 }
2983
2984
2985 if (!attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2986 return EINVAL;
2987 }
2988
2989 int rem = NLA_ALIGN(
2990 nl_attr_get_size(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]));
2991 const struct ovs_zone_limit *zone_limit =
2992 nl_attr_get(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]);
2993
2994 while (rem >= sizeof *zone_limit) {
2995 if (zone_limit->zone_id == OVS_ZONE_LIMIT_DEFAULT_ZONE) {
2996 *default_limit = zone_limit->limit;
2997 } else if (zone_limit->zone_id < OVS_ZONE_LIMIT_DEFAULT_ZONE ||
2998 zone_limit->zone_id > UINT16_MAX) {
2999 } else {
3000 ct_dpif_push_zone_limit(zone_limits, zone_limit->zone_id,
3001 zone_limit->limit, zone_limit->count);
3002 }
3003 rem -= NLA_ALIGN(sizeof *zone_limit);
3004 zone_limit = ALIGNED_CAST(struct ovs_zone_limit *,
3005 (unsigned char *) zone_limit + NLA_ALIGN(sizeof *zone_limit));
3006 }
3007 return 0;
3008 }
3009
3010 static int
3011 dpif_netlink_ct_get_limits(struct dpif *dpif OVS_UNUSED,
3012 uint32_t *default_limit,
3013 const struct ovs_list *zone_limits_request,
3014 struct ovs_list *zone_limits_reply)
3015 {
3016 if (ovs_ct_limit_family < 0) {
3017 return EOPNOTSUPP;
3018 }
3019
3020 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
3021 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
3022 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_GET,
3023 OVS_CT_LIMIT_VERSION);
3024
3025 struct ovs_header *ovs_header;
3026 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
3027 ovs_header->dp_ifindex = 0;
3028
3029 if (!ovs_list_is_empty(zone_limits_request)) {
3030 size_t opt_offset = nl_msg_start_nested(request,
3031 OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
3032
3033 struct ovs_zone_limit req_zone_limit;
3034 req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
3035 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
3036
3037 struct ct_dpif_zone_limit *zone_limit;
3038 LIST_FOR_EACH (zone_limit, node, zone_limits_request) {
3039 req_zone_limit.zone_id = zone_limit->zone;
3040 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
3041 }
3042
3043 nl_msg_end_nested(request, opt_offset);
3044 }
3045
3046 struct ofpbuf *reply;
3047 int err = nl_transact(NETLINK_GENERIC, request, &reply);
3048 if (err) {
3049 goto out;
3050 }
3051
3052 err = dpif_netlink_zone_limits_from_ofpbuf(reply, default_limit,
3053 zone_limits_reply);
3054
3055 out:
3056 ofpbuf_delete(request);
3057 ofpbuf_delete(reply);
3058 return err;
3059 }
3060
3061 static int
3062 dpif_netlink_ct_del_limits(struct dpif *dpif OVS_UNUSED,
3063 const struct ovs_list *zone_limits)
3064 {
3065 if (ovs_ct_limit_family < 0) {
3066 return EOPNOTSUPP;
3067 }
3068
3069 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
3070 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
3071 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_DEL,
3072 OVS_CT_LIMIT_VERSION);
3073
3074 struct ovs_header *ovs_header;
3075 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
3076 ovs_header->dp_ifindex = 0;
3077
3078 if (!ovs_list_is_empty(zone_limits)) {
3079 size_t opt_offset =
3080 nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
3081
3082 struct ct_dpif_zone_limit *zone_limit;
3083 LIST_FOR_EACH (zone_limit, node, zone_limits) {
3084 struct ovs_zone_limit req_zone_limit;
3085 req_zone_limit.zone_id = zone_limit->zone;
3086 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
3087 }
3088 nl_msg_end_nested(request, opt_offset);
3089 }
3090
3091 int err = nl_transact(NETLINK_GENERIC, request, NULL);
3092
3093 ofpbuf_delete(request);
3094 return err;
3095 }
3096
3097 #define NL_TP_NAME_PREFIX "ovs_tp_"
3098
3099 struct dpif_netlink_timeout_policy_protocol {
3100 uint16_t l3num;
3101 uint8_t l4num;
3102 };
3103
3104 enum OVS_PACKED_ENUM dpif_netlink_support_timeout_policy_protocol {
3105 DPIF_NL_TP_AF_INET_TCP,
3106 DPIF_NL_TP_AF_INET_UDP,
3107 DPIF_NL_TP_AF_INET_ICMP,
3108 DPIF_NL_TP_AF_INET6_TCP,
3109 DPIF_NL_TP_AF_INET6_UDP,
3110 DPIF_NL_TP_AF_INET6_ICMPV6,
3111 DPIF_NL_TP_MAX
3112 };
3113
3114 #define DPIF_NL_ALL_TP ((1UL << DPIF_NL_TP_MAX) - 1)
3115
3116
3117 static struct dpif_netlink_timeout_policy_protocol tp_protos[] = {
3118 [DPIF_NL_TP_AF_INET_TCP] = { .l3num = AF_INET, .l4num = IPPROTO_TCP },
3119 [DPIF_NL_TP_AF_INET_UDP] = { .l3num = AF_INET, .l4num = IPPROTO_UDP },
3120 [DPIF_NL_TP_AF_INET_ICMP] = { .l3num = AF_INET, .l4num = IPPROTO_ICMP },
3121 [DPIF_NL_TP_AF_INET6_TCP] = { .l3num = AF_INET6, .l4num = IPPROTO_TCP },
3122 [DPIF_NL_TP_AF_INET6_UDP] = { .l3num = AF_INET6, .l4num = IPPROTO_UDP },
3123 [DPIF_NL_TP_AF_INET6_ICMPV6] = { .l3num = AF_INET6,
3124 .l4num = IPPROTO_ICMPV6 },
3125 };
3126
3127 static void
3128 dpif_netlink_format_tp_name(uint32_t id, uint16_t l3num, uint8_t l4num,
3129 char **tp_name)
3130 {
3131 struct ds ds = DS_EMPTY_INITIALIZER;
3132 ds_put_format(&ds, "%s%"PRIu32"_", NL_TP_NAME_PREFIX, id);
3133 ct_dpif_format_ipproto(&ds, l4num);
3134
3135 if (l3num == AF_INET) {
3136 ds_put_cstr(&ds, "4");
3137 } else if (l3num == AF_INET6 && l4num != IPPROTO_ICMPV6) {
3138 ds_put_cstr(&ds, "6");
3139 }
3140
3141 ovs_assert(ds.length < CTNL_TIMEOUT_NAME_MAX);
3142
3143 *tp_name = ds_steal_cstr(&ds);
3144 }
3145
3146 static int
3147 dpif_netlink_ct_get_timeout_policy_name(struct dpif *dpif OVS_UNUSED,
3148 uint32_t tp_id, uint16_t dl_type,
3149 uint8_t nw_proto, char **tp_name,
3150 bool *is_generic)
3151 {
3152 dpif_netlink_format_tp_name(tp_id,
3153 dl_type == ETH_TYPE_IP ? AF_INET : AF_INET6,
3154 nw_proto, tp_name);
3155 *is_generic = false;
3156 return 0;
3157 }
3158
3159 #define CT_DPIF_NL_TP_TCP_MAPPINGS \
3160 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT, SYN_SENT) \
3161 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_RECV, SYN_RECV) \
3162 CT_DPIF_NL_TP_MAPPING(TCP, TCP, ESTABLISHED, ESTABLISHED) \
3163 CT_DPIF_NL_TP_MAPPING(TCP, TCP, FIN_WAIT, FIN_WAIT) \
3164 CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE_WAIT, CLOSE_WAIT) \
3165 CT_DPIF_NL_TP_MAPPING(TCP, TCP, LAST_ACK, LAST_ACK) \
3166 CT_DPIF_NL_TP_MAPPING(TCP, TCP, TIME_WAIT, TIME_WAIT) \
3167 CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE, CLOSE) \
3168 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT2, SYN_SENT2) \
3169 CT_DPIF_NL_TP_MAPPING(TCP, TCP, RETRANSMIT, RETRANS) \
3170 CT_DPIF_NL_TP_MAPPING(TCP, TCP, UNACK, UNACK)
3171
3172 #define CT_DPIF_NL_TP_UDP_MAPPINGS \
3173 CT_DPIF_NL_TP_MAPPING(UDP, UDP, SINGLE, UNREPLIED) \
3174 CT_DPIF_NL_TP_MAPPING(UDP, UDP, MULTIPLE, REPLIED)
3175
3176 #define CT_DPIF_NL_TP_ICMP_MAPPINGS \
3177 CT_DPIF_NL_TP_MAPPING(ICMP, ICMP, FIRST, TIMEOUT)
3178
3179 #define CT_DPIF_NL_TP_ICMPV6_MAPPINGS \
3180 CT_DPIF_NL_TP_MAPPING(ICMP, ICMPV6, FIRST, TIMEOUT)
3181
3182
3183 #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
3184 if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
3185 nl_tp->present |= 1 << CTA_TIMEOUT_##PROTO2##_##ATTR2; \
3186 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2] = \
3187 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]; \
3188 }
3189
3190 static void
3191 dpif_netlink_get_nl_tp_tcp_attrs(const struct ct_dpif_timeout_policy *tp,
3192 struct nl_ct_timeout_policy *nl_tp)
3193 {
3194 CT_DPIF_NL_TP_TCP_MAPPINGS
3195 }
3196
3197 static void
3198 dpif_netlink_get_nl_tp_udp_attrs(const struct ct_dpif_timeout_policy *tp,
3199 struct nl_ct_timeout_policy *nl_tp)
3200 {
3201 CT_DPIF_NL_TP_UDP_MAPPINGS
3202 }
3203
3204 static void
3205 dpif_netlink_get_nl_tp_icmp_attrs(const struct ct_dpif_timeout_policy *tp,
3206 struct nl_ct_timeout_policy *nl_tp)
3207 {
3208 CT_DPIF_NL_TP_ICMP_MAPPINGS
3209 }
3210
3211 static void
3212 dpif_netlink_get_nl_tp_icmpv6_attrs(const struct ct_dpif_timeout_policy *tp,
3213 struct nl_ct_timeout_policy *nl_tp)
3214 {
3215 CT_DPIF_NL_TP_ICMPV6_MAPPINGS
3216 }
3217
3218 #undef CT_DPIF_NL_TP_MAPPING
3219
3220 static void
3221 dpif_netlink_get_nl_tp_attrs(const struct ct_dpif_timeout_policy *tp,
3222 uint8_t l4num, struct nl_ct_timeout_policy *nl_tp)
3223 {
3224 nl_tp->present = 0;
3225
3226 if (l4num == IPPROTO_TCP) {
3227 dpif_netlink_get_nl_tp_tcp_attrs(tp, nl_tp);
3228 } else if (l4num == IPPROTO_UDP) {
3229 dpif_netlink_get_nl_tp_udp_attrs(tp, nl_tp);
3230 } else if (l4num == IPPROTO_ICMP) {
3231 dpif_netlink_get_nl_tp_icmp_attrs(tp, nl_tp);
3232 } else if (l4num == IPPROTO_ICMPV6) {
3233 dpif_netlink_get_nl_tp_icmpv6_attrs(tp, nl_tp);
3234 }
3235 }
3236
3237 #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
3238 if (nl_tp->present & (1 << CTA_TIMEOUT_##PROTO2##_##ATTR2)) { \
3239 if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
3240 if (tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] != \
3241 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]) { \
3242 VLOG_WARN_RL(&error_rl, "Inconsistent timeout policy %s " \
3243 "attribute %s=%"PRIu32" while %s=%"PRIu32, \
3244 nl_tp->name, "CTA_TIMEOUT_"#PROTO2"_"#ATTR2, \
3245 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2], \
3246 "CT_DPIF_TP_ATTR_"#PROTO1"_"#ATTR1, \
3247 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]); \
3248 } \
3249 } else { \
3250 tp->present |= 1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1; \
3251 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] = \
3252 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]; \
3253 } \
3254 }
3255
3256 static void
3257 dpif_netlink_set_ct_dpif_tp_tcp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3258 struct ct_dpif_timeout_policy *tp)
3259 {
3260 CT_DPIF_NL_TP_TCP_MAPPINGS
3261 }
3262
3263 static void
3264 dpif_netlink_set_ct_dpif_tp_udp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3265 struct ct_dpif_timeout_policy *tp)
3266 {
3267 CT_DPIF_NL_TP_UDP_MAPPINGS
3268 }
3269
3270 static void
3271 dpif_netlink_set_ct_dpif_tp_icmp_attrs(
3272 const struct nl_ct_timeout_policy *nl_tp,
3273 struct ct_dpif_timeout_policy *tp)
3274 {
3275 CT_DPIF_NL_TP_ICMP_MAPPINGS
3276 }
3277
3278 static void
3279 dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(
3280 const struct nl_ct_timeout_policy *nl_tp,
3281 struct ct_dpif_timeout_policy *tp)
3282 {
3283 CT_DPIF_NL_TP_ICMPV6_MAPPINGS
3284 }
3285
3286 #undef CT_DPIF_NL_TP_MAPPING
3287
3288 static void
3289 dpif_netlink_set_ct_dpif_tp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3290 struct ct_dpif_timeout_policy *tp)
3291 {
3292 if (nl_tp->l4num == IPPROTO_TCP) {
3293 dpif_netlink_set_ct_dpif_tp_tcp_attrs(nl_tp, tp);
3294 } else if (nl_tp->l4num == IPPROTO_UDP) {
3295 dpif_netlink_set_ct_dpif_tp_udp_attrs(nl_tp, tp);
3296 } else if (nl_tp->l4num == IPPROTO_ICMP) {
3297 dpif_netlink_set_ct_dpif_tp_icmp_attrs(nl_tp, tp);
3298 } else if (nl_tp->l4num == IPPROTO_ICMPV6) {
3299 dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(nl_tp, tp);
3300 }
3301 }
3302
3303 #ifdef _WIN32
3304 static int
3305 dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
3306 const struct ct_dpif_timeout_policy *tp)
3307 {
3308 return EOPNOTSUPP;
3309 }
3310
3311 static int
3312 dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
3313 uint32_t tp_id,
3314 struct ct_dpif_timeout_policy *tp)
3315 {
3316 return EOPNOTSUPP;
3317 }
3318
3319 static int
3320 dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
3321 uint32_t tp_id)
3322 {
3323 return EOPNOTSUPP;
3324 }
3325
3326 static int
3327 dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
3328 void **statep)
3329 {
3330 return EOPNOTSUPP;
3331 }
3332
3333 static int
3334 dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
3335 void *state,
3336 struct ct_dpif_timeout_policy **tp)
3337 {
3338 return EOPNOTSUPP;
3339 }
3340
3341 static int
3342 dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
3343 void *state)
3344 {
3345 return EOPNOTSUPP;
3346 }
3347 #else
3348 static int
3349 dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
3350 const struct ct_dpif_timeout_policy *tp)
3351 {
3352 int err = 0;
3353
3354 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3355 struct nl_ct_timeout_policy nl_tp;
3356 char *nl_tp_name;
3357
3358 dpif_netlink_format_tp_name(tp->id, tp_protos[i].l3num,
3359 tp_protos[i].l4num, &nl_tp_name);
3360 ovs_strlcpy(nl_tp.name, nl_tp_name, sizeof nl_tp.name);
3361 free(nl_tp_name);
3362
3363 nl_tp.l3num = tp_protos[i].l3num;
3364 nl_tp.l4num = tp_protos[i].l4num;
3365 dpif_netlink_get_nl_tp_attrs(tp, tp_protos[i].l4num, &nl_tp);
3366 err = nl_ct_set_timeout_policy(&nl_tp);
3367 if (err) {
3368 VLOG_WARN_RL(&error_rl, "failed to add timeout policy %s (%s)",
3369 nl_tp.name, ovs_strerror(err));
3370 goto out;
3371 }
3372 }
3373
3374 out:
3375 return err;
3376 }
3377
3378 static int
3379 dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
3380 uint32_t tp_id,
3381 struct ct_dpif_timeout_policy *tp)
3382 {
3383 int err = 0;
3384
3385 tp->id = tp_id;
3386 tp->present = 0;
3387 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3388 struct nl_ct_timeout_policy nl_tp;
3389 char *nl_tp_name;
3390
3391 dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
3392 tp_protos[i].l4num, &nl_tp_name);
3393 err = nl_ct_get_timeout_policy(nl_tp_name, &nl_tp);
3394
3395 if (err) {
3396 VLOG_WARN_RL(&error_rl, "failed to get timeout policy %s (%s)",
3397 nl_tp_name, ovs_strerror(err));
3398 free(nl_tp_name);
3399 goto out;
3400 }
3401 free(nl_tp_name);
3402 dpif_netlink_set_ct_dpif_tp_attrs(&nl_tp, tp);
3403 }
3404
3405 out:
3406 return err;
3407 }
3408
3409 /* Returns 0 if all the sub timeout policies are deleted or not exist in the
3410 * kernel. Returns 1 if any sub timeout policy deletion failed. */
3411 static int
3412 dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
3413 uint32_t tp_id)
3414 {
3415 int ret = 0;
3416
3417 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3418 char *nl_tp_name;
3419 dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
3420 tp_protos[i].l4num, &nl_tp_name);
3421 int err = nl_ct_del_timeout_policy(nl_tp_name);
3422 if (err == ENOENT) {
3423 err = 0;
3424 }
3425 if (err) {
3426 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(6, 6);
3427 VLOG_INFO_RL(&rl, "failed to delete timeout policy %s (%s)",
3428 nl_tp_name, ovs_strerror(err));
3429 ret = 1;
3430 }
3431 free(nl_tp_name);
3432 }
3433
3434 return ret;
3435 }
3436
3437 struct dpif_netlink_ct_timeout_policy_dump_state {
3438 struct nl_ct_timeout_policy_dump_state *nl_dump_state;
3439 struct hmap tp_dump_map;
3440 };
3441
3442 struct dpif_netlink_tp_dump_node {
3443 struct hmap_node hmap_node; /* node in tp_dump_map. */
3444 struct ct_dpif_timeout_policy *tp;
3445 uint32_t l3_l4_present;
3446 };
3447
3448 static struct dpif_netlink_tp_dump_node *
3449 get_dpif_netlink_tp_dump_node_by_tp_id(uint32_t tp_id,
3450 struct hmap *tp_dump_map)
3451 {
3452 struct dpif_netlink_tp_dump_node *tp_dump_node;
3453
3454 HMAP_FOR_EACH_WITH_HASH (tp_dump_node, hmap_node, hash_int(tp_id, 0),
3455 tp_dump_map) {
3456 if (tp_dump_node->tp->id == tp_id) {
3457 return tp_dump_node;
3458 }
3459 }
3460 return NULL;
3461 }
3462
3463 static void
3464 update_dpif_netlink_tp_dump_node(
3465 const struct nl_ct_timeout_policy *nl_tp,
3466 struct dpif_netlink_tp_dump_node *tp_dump_node)
3467 {
3468 dpif_netlink_set_ct_dpif_tp_attrs(nl_tp, tp_dump_node->tp);
3469 for (int i = 0; i < DPIF_NL_TP_MAX; ++i) {
3470 if (nl_tp->l3num == tp_protos[i].l3num &&
3471 nl_tp->l4num == tp_protos[i].l4num) {
3472 tp_dump_node->l3_l4_present |= 1 << i;
3473 break;
3474 }
3475 }
3476 }
3477
3478 static int
3479 dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
3480 void **statep)
3481 {
3482 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state;
3483
3484 *statep = dump_state = xzalloc(sizeof *dump_state);
3485 int err = nl_ct_timeout_policy_dump_start(&dump_state->nl_dump_state);
3486 if (err) {
3487 free(dump_state);
3488 return err;
3489 }
3490 hmap_init(&dump_state->tp_dump_map);
3491 return 0;
3492 }
3493
3494 static void
3495 get_and_cleanup_tp_dump_node(struct hmap *hmap,
3496 struct dpif_netlink_tp_dump_node *tp_dump_node,
3497 struct ct_dpif_timeout_policy *tp)
3498 {
3499 hmap_remove(hmap, &tp_dump_node->hmap_node);
3500 *tp = *tp_dump_node->tp;
3501 free(tp_dump_node->tp);
3502 free(tp_dump_node);
3503 }
3504
3505 static int
3506 dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
3507 void *state,
3508 struct ct_dpif_timeout_policy *tp)
3509 {
3510 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
3511 struct dpif_netlink_tp_dump_node *tp_dump_node;
3512 int err;
3513
3514 /* Dumps all the timeout policies in the kernel. */
3515 do {
3516 struct nl_ct_timeout_policy nl_tp;
3517 uint32_t tp_id;
3518
3519 err = nl_ct_timeout_policy_dump_next(dump_state->nl_dump_state,
3520 &nl_tp);
3521 if (err) {
3522 break;
3523 }
3524
3525 /* We only interest in OVS installed timeout policies. */
3526 if (!ovs_scan(nl_tp.name, NL_TP_NAME_PREFIX"%"PRIu32, &tp_id)) {
3527 continue;
3528 }
3529
3530 tp_dump_node = get_dpif_netlink_tp_dump_node_by_tp_id(
3531 tp_id, &dump_state->tp_dump_map);
3532 if (!tp_dump_node) {
3533 tp_dump_node = xzalloc(sizeof *tp_dump_node);
3534 tp_dump_node->tp = xzalloc(sizeof *tp_dump_node->tp);
3535 tp_dump_node->tp->id = tp_id;
3536 hmap_insert(&dump_state->tp_dump_map, &tp_dump_node->hmap_node,
3537 hash_int(tp_id, 0));
3538 }
3539
3540 update_dpif_netlink_tp_dump_node(&nl_tp, tp_dump_node);
3541
3542 /* Returns one ct_dpif_timeout_policy if we gather all the L3/L4
3543 * sub-pieces. */
3544 if (tp_dump_node->l3_l4_present == DPIF_NL_ALL_TP) {
3545 get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
3546 tp_dump_node, tp);
3547 break;
3548 }
3549 } while (true);
3550
3551 /* Dump the incomplete timeout policies. */
3552 if (err == EOF) {
3553 if (!hmap_is_empty(&dump_state->tp_dump_map)) {
3554 struct hmap_node *hmap_node = hmap_first(&dump_state->tp_dump_map);
3555 tp_dump_node = CONTAINER_OF(hmap_node,
3556 struct dpif_netlink_tp_dump_node,
3557 hmap_node);
3558 get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
3559 tp_dump_node, tp);
3560 return 0;
3561 }
3562 }
3563
3564 return err;
3565 }
3566
3567 static int
3568 dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
3569 void *state)
3570 {
3571 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
3572 struct dpif_netlink_tp_dump_node *tp_dump_node;
3573
3574 int err = nl_ct_timeout_policy_dump_done(dump_state->nl_dump_state);
3575 HMAP_FOR_EACH_POP (tp_dump_node, hmap_node, &dump_state->tp_dump_map) {
3576 free(tp_dump_node->tp);
3577 free(tp_dump_node);
3578 }
3579 hmap_destroy(&dump_state->tp_dump_map);
3580 free(dump_state);
3581 return err;
3582 }
3583 #endif
3584
3585 \f
3586 /* Meters */
3587
3588 /* Set of supported meter flags */
3589 #define DP_SUPPORTED_METER_FLAGS_MASK \
3590 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
3591
3592 /* Meter support was introduced in Linux 4.15. In some versions of
3593 * Linux 4.15, 4.16, and 4.17, there was a bug that never set the id
3594 * when the meter was created, so all meters essentially had an id of
3595 * zero. Check for that condition and disable meters on those kernels. */
3596 static bool probe_broken_meters(struct dpif *);
3597
3598 static void
3599 dpif_netlink_meter_init(struct dpif_netlink *dpif, struct ofpbuf *buf,
3600 void *stub, size_t size, uint32_t command)
3601 {
3602 ofpbuf_use_stub(buf, stub, size);
3603
3604 nl_msg_put_genlmsghdr(buf, 0, ovs_meter_family, NLM_F_REQUEST | NLM_F_ECHO,
3605 command, OVS_METER_VERSION);
3606
3607 struct ovs_header *ovs_header;
3608 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
3609 ovs_header->dp_ifindex = dpif->dp_ifindex;
3610 }
3611
3612 /* Execute meter 'request' in the kernel datapath. If the command
3613 * fails, returns a positive errno value. Otherwise, stores the reply
3614 * in '*replyp', parses the policy according to 'reply_policy' into the
3615 * array of Netlink attribute in 'a', and returns 0. On success, the
3616 * caller is responsible for calling ofpbuf_delete() on '*replyp'
3617 * ('replyp' will contain pointers into 'a'). */
3618 static int
3619 dpif_netlink_meter_transact(struct ofpbuf *request, struct ofpbuf **replyp,
3620 const struct nl_policy *reply_policy,
3621 struct nlattr **a, size_t size_a)
3622 {
3623 int error = nl_transact(NETLINK_GENERIC, request, replyp);
3624 ofpbuf_uninit(request);
3625
3626 if (error) {
3627 return error;
3628 }
3629
3630 struct nlmsghdr *nlmsg = ofpbuf_try_pull(*replyp, sizeof *nlmsg);
3631 struct genlmsghdr *genl = ofpbuf_try_pull(*replyp, sizeof *genl);
3632 struct ovs_header *ovs_header = ofpbuf_try_pull(*replyp,
3633 sizeof *ovs_header);
3634 if (!nlmsg || !genl || !ovs_header
3635 || nlmsg->nlmsg_type != ovs_meter_family
3636 || !nl_policy_parse(*replyp, 0, reply_policy, a, size_a)) {
3637 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3638 VLOG_DBG_RL(&rl,
3639 "Kernel module response to meter tranaction is invalid");
3640 return EINVAL;
3641 }
3642 return 0;
3643 }
3644
3645 static void
3646 dpif_netlink_meter_get_features(const struct dpif *dpif_,
3647 struct ofputil_meter_features *features)
3648 {
3649 if (probe_broken_meters(CONST_CAST(struct dpif *, dpif_))) {
3650 features = NULL;
3651 return;
3652 }
3653
3654 struct ofpbuf buf, *msg;
3655 uint64_t stub[1024 / 8];
3656
3657 static const struct nl_policy ovs_meter_features_policy[] = {
3658 [OVS_METER_ATTR_MAX_METERS] = { .type = NL_A_U32 },
3659 [OVS_METER_ATTR_MAX_BANDS] = { .type = NL_A_U32 },
3660 [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
3661 };
3662 struct nlattr *a[ARRAY_SIZE(ovs_meter_features_policy)];
3663
3664 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3665 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub,
3666 OVS_METER_CMD_FEATURES);
3667 if (dpif_netlink_meter_transact(&buf, &msg, ovs_meter_features_policy, a,
3668 ARRAY_SIZE(ovs_meter_features_policy))) {
3669 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3670 VLOG_INFO_RL(&rl,
3671 "dpif_netlink_meter_transact OVS_METER_CMD_FEATURES failed");
3672 return;
3673 }
3674
3675 features->max_meters = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_METERS]);
3676 features->max_bands = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_BANDS]);
3677
3678 /* Bands is a nested attribute of zero or more nested
3679 * band attributes. */
3680 if (a[OVS_METER_ATTR_BANDS]) {
3681 const struct nlattr *nla;
3682 size_t left;
3683
3684 NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
3685 const struct nlattr *band_nla;
3686 size_t band_left;
3687
3688 NL_NESTED_FOR_EACH (band_nla, band_left, nla) {
3689 if (nl_attr_type(band_nla) == OVS_BAND_ATTR_TYPE) {
3690 if (nl_attr_get_size(band_nla) == sizeof(uint32_t)) {
3691 switch (nl_attr_get_u32(band_nla)) {
3692 case OVS_METER_BAND_TYPE_DROP:
3693 features->band_types |= 1 << OFPMBT13_DROP;
3694 break;
3695 }
3696 }
3697 }
3698 }
3699 }
3700 }
3701 features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK;
3702
3703 ofpbuf_delete(msg);
3704 }
3705
3706 static int
3707 dpif_netlink_meter_set__(struct dpif *dpif_, ofproto_meter_id meter_id,
3708 struct ofputil_meter_config *config)
3709 {
3710 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3711 struct ofpbuf buf, *msg;
3712 uint64_t stub[1024 / 8];
3713
3714 static const struct nl_policy ovs_meter_set_response_policy[] = {
3715 [OVS_METER_ATTR_ID] = { .type = NL_A_U32 },
3716 };
3717 struct nlattr *a[ARRAY_SIZE(ovs_meter_set_response_policy)];
3718
3719 if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK) {
3720 return EBADF; /* Unsupported flags set */
3721 }
3722
3723 for (size_t i = 0; i < config->n_bands; i++) {
3724 switch (config->bands[i].type) {
3725 case OFPMBT13_DROP:
3726 break;
3727 default:
3728 return ENODEV; /* Unsupported band type */
3729 }
3730 }
3731
3732 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, OVS_METER_CMD_SET);
3733
3734 nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
3735
3736 if (config->flags & OFPMF13_KBPS) {
3737 nl_msg_put_flag(&buf, OVS_METER_ATTR_KBPS);
3738 }
3739
3740 size_t bands_offset = nl_msg_start_nested(&buf, OVS_METER_ATTR_BANDS);
3741 /* Bands */
3742 for (size_t i = 0; i < config->n_bands; ++i) {
3743 struct ofputil_meter_band * band = &config->bands[i];
3744 uint32_t band_type;
3745
3746 size_t band_offset = nl_msg_start_nested(&buf, OVS_BAND_ATTR_UNSPEC);
3747
3748 switch (band->type) {
3749 case OFPMBT13_DROP:
3750 band_type = OVS_METER_BAND_TYPE_DROP;
3751 break;
3752 default:
3753 band_type = OVS_METER_BAND_TYPE_UNSPEC;
3754 }
3755 nl_msg_put_u32(&buf, OVS_BAND_ATTR_TYPE, band_type);
3756 nl_msg_put_u32(&buf, OVS_BAND_ATTR_RATE, band->rate);
3757 nl_msg_put_u32(&buf, OVS_BAND_ATTR_BURST,
3758 config->flags & OFPMF13_BURST ?
3759 band->burst_size : band->rate);
3760 nl_msg_end_nested(&buf, band_offset);
3761 }
3762 nl_msg_end_nested(&buf, bands_offset);
3763
3764 int error = dpif_netlink_meter_transact(&buf, &msg,
3765 ovs_meter_set_response_policy, a,
3766 ARRAY_SIZE(ovs_meter_set_response_policy));
3767 if (error) {
3768 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3769 VLOG_INFO_RL(&rl,
3770 "dpif_netlink_meter_transact OVS_METER_CMD_SET failed");
3771 return error;
3772 }
3773
3774 if (nl_attr_get_u32(a[OVS_METER_ATTR_ID]) != meter_id.uint32) {
3775 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3776 VLOG_INFO_RL(&rl,
3777 "Kernel returned a different meter id than requested");
3778 }
3779 ofpbuf_delete(msg);
3780 return 0;
3781 }
3782
3783 static int
3784 dpif_netlink_meter_set(struct dpif *dpif_, ofproto_meter_id meter_id,
3785 struct ofputil_meter_config *config)
3786 {
3787 if (probe_broken_meters(dpif_)) {
3788 return ENOMEM;
3789 }
3790
3791 return dpif_netlink_meter_set__(dpif_, meter_id, config);
3792 }
3793
3794 /* Retrieve statistics and/or delete meter 'meter_id'. Statistics are
3795 * stored in 'stats', if it is not null. If 'command' is
3796 * OVS_METER_CMD_DEL, the meter is deleted and statistics are optionally
3797 * retrieved. If 'command' is OVS_METER_CMD_GET, then statistics are
3798 * simply retrieved. */
3799 static int
3800 dpif_netlink_meter_get_stats(const struct dpif *dpif_,
3801 ofproto_meter_id meter_id,
3802 struct ofputil_meter_stats *stats,
3803 uint16_t max_bands,
3804 enum ovs_meter_cmd command)
3805 {
3806 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3807 struct ofpbuf buf, *msg;
3808 uint64_t stub[1024 / 8];
3809
3810 static const struct nl_policy ovs_meter_stats_policy[] = {
3811 [OVS_METER_ATTR_ID] = { .type = NL_A_U32, .optional = true},
3812 [OVS_METER_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
3813 .optional = true},
3814 [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
3815 };
3816 struct nlattr *a[ARRAY_SIZE(ovs_meter_stats_policy)];
3817
3818 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, command);
3819
3820 nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
3821
3822 int error = dpif_netlink_meter_transact(&buf, &msg,
3823 ovs_meter_stats_policy, a,
3824 ARRAY_SIZE(ovs_meter_stats_policy));
3825 if (error) {
3826 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3827 VLOG_INFO_RL(&rl, "dpif_netlink_meter_transact %s failed",
3828 command == OVS_METER_CMD_GET ? "get" : "del");
3829 return error;
3830 }
3831
3832 if (stats
3833 && a[OVS_METER_ATTR_ID]
3834 && a[OVS_METER_ATTR_STATS]
3835 && nl_attr_get_u32(a[OVS_METER_ATTR_ID]) == meter_id.uint32) {
3836 /* return stats */
3837 const struct ovs_flow_stats *stat;
3838 const struct nlattr *nla;
3839 size_t left;
3840
3841 stat = nl_attr_get(a[OVS_METER_ATTR_STATS]);
3842 stats->packet_in_count = get_32aligned_u64(&stat->n_packets);
3843 stats->byte_in_count = get_32aligned_u64(&stat->n_bytes);
3844
3845 if (a[OVS_METER_ATTR_BANDS]) {
3846 size_t n_bands = 0;
3847 NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
3848 const struct nlattr *band_nla;
3849 band_nla = nl_attr_find_nested(nla, OVS_BAND_ATTR_STATS);
3850 if (band_nla && nl_attr_get_size(band_nla) \
3851 == sizeof(struct ovs_flow_stats)) {
3852 stat = nl_attr_get(band_nla);
3853
3854 if (n_bands < max_bands) {
3855 stats->bands[n_bands].packet_count
3856 = get_32aligned_u64(&stat->n_packets);
3857 stats->bands[n_bands].byte_count
3858 = get_32aligned_u64(&stat->n_bytes);
3859 ++n_bands;
3860 }
3861 } else {
3862 stats->bands[n_bands].packet_count = 0;
3863 stats->bands[n_bands].byte_count = 0;
3864 ++n_bands;
3865 }
3866 }
3867 stats->n_bands = n_bands;
3868 } else {
3869 /* For a non-existent meter, return 0 stats. */
3870 stats->n_bands = 0;
3871 }
3872 }
3873
3874 ofpbuf_delete(msg);
3875 return error;
3876 }
3877
3878 static int
3879 dpif_netlink_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
3880 struct ofputil_meter_stats *stats, uint16_t max_bands)
3881 {
3882 return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
3883 OVS_METER_CMD_GET);
3884 }
3885
3886 static int
3887 dpif_netlink_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
3888 struct ofputil_meter_stats *stats, uint16_t max_bands)
3889 {
3890 return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
3891 OVS_METER_CMD_DEL);
3892 }
3893
3894 static bool
3895 probe_broken_meters__(struct dpif *dpif)
3896 {
3897 /* This test is destructive if a probe occurs while ovs-vswitchd is
3898 * running (e.g., an ovs-dpctl meter command is called), so choose a
3899 * random high meter id to make this less likely to occur. */
3900 ofproto_meter_id id1 = { 54545401 };
3901 ofproto_meter_id id2 = { 54545402 };
3902 struct ofputil_meter_band band = {OFPMBT13_DROP, 0, 1, 0};
3903 struct ofputil_meter_config config1 = { 1, OFPMF13_KBPS, 1, &band};
3904 struct ofputil_meter_config config2 = { 2, OFPMF13_KBPS, 1, &band};
3905
3906 /* Try adding two meters and make sure that they both come back with
3907 * the proper meter id. Use the "__" version so that we don't cause
3908 * a recurve deadlock. */
3909 dpif_netlink_meter_set__(dpif, id1, &config1);
3910 dpif_netlink_meter_set__(dpif, id2, &config2);
3911
3912 if (dpif_netlink_meter_get(dpif, id1, NULL, 0)
3913 || dpif_netlink_meter_get(dpif, id2, NULL, 0)) {
3914 VLOG_INFO("The kernel module has a broken meter implementation.");
3915 return true;
3916 }
3917
3918 dpif_netlink_meter_del(dpif, id1, NULL, 0);
3919 dpif_netlink_meter_del(dpif, id2, NULL, 0);
3920
3921 return false;
3922 }
3923
3924 static bool
3925 probe_broken_meters(struct dpif *dpif)
3926 {
3927 /* This is a once-only test because currently OVS only has at most a single
3928 * Netlink capable datapath on any given platform. */
3929 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3930
3931 static bool broken_meters = false;
3932 if (ovsthread_once_start(&once)) {
3933 broken_meters = probe_broken_meters__(dpif);
3934 ovsthread_once_done(&once);
3935 }
3936 return broken_meters;
3937 }
3938 \f
3939 const struct dpif_class dpif_netlink_class = {
3940 "system",
3941 false, /* cleanup_required */
3942 NULL, /* init */
3943 dpif_netlink_enumerate,
3944 NULL,
3945 dpif_netlink_open,
3946 dpif_netlink_close,
3947 dpif_netlink_destroy,
3948 dpif_netlink_run,
3949 NULL, /* wait */
3950 dpif_netlink_get_stats,
3951 dpif_netlink_set_features,
3952 dpif_netlink_port_add,
3953 dpif_netlink_port_del,
3954 NULL, /* port_set_config */
3955 dpif_netlink_port_query_by_number,
3956 dpif_netlink_port_query_by_name,
3957 dpif_netlink_port_get_pid,
3958 dpif_netlink_port_dump_start,
3959 dpif_netlink_port_dump_next,
3960 dpif_netlink_port_dump_done,
3961 dpif_netlink_port_poll,
3962 dpif_netlink_port_poll_wait,
3963 dpif_netlink_flow_flush,
3964 dpif_netlink_flow_dump_create,
3965 dpif_netlink_flow_dump_destroy,
3966 dpif_netlink_flow_dump_thread_create,
3967 dpif_netlink_flow_dump_thread_destroy,
3968 dpif_netlink_flow_dump_next,
3969 dpif_netlink_operate,
3970 dpif_netlink_recv_set,
3971 dpif_netlink_handlers_set,
3972 NULL, /* set_config */
3973 dpif_netlink_queue_to_priority,
3974 dpif_netlink_recv,
3975 dpif_netlink_recv_wait,
3976 dpif_netlink_recv_purge,
3977 NULL, /* register_dp_purge_cb */
3978 NULL, /* register_upcall_cb */
3979 NULL, /* enable_upcall */
3980 NULL, /* disable_upcall */
3981 dpif_netlink_get_datapath_version, /* get_datapath_version */
3982 dpif_netlink_ct_dump_start,
3983 dpif_netlink_ct_dump_next,
3984 dpif_netlink_ct_dump_done,
3985 dpif_netlink_ct_flush,
3986 NULL, /* ct_set_maxconns */
3987 NULL, /* ct_get_maxconns */
3988 NULL, /* ct_get_nconns */
3989 NULL, /* ct_set_tcp_seq_chk */
3990 NULL, /* ct_get_tcp_seq_chk */
3991 dpif_netlink_ct_set_limits,
3992 dpif_netlink_ct_get_limits,
3993 dpif_netlink_ct_del_limits,
3994 dpif_netlink_ct_set_timeout_policy,
3995 dpif_netlink_ct_get_timeout_policy,
3996 dpif_netlink_ct_del_timeout_policy,
3997 dpif_netlink_ct_timeout_policy_dump_start,
3998 dpif_netlink_ct_timeout_policy_dump_next,
3999 dpif_netlink_ct_timeout_policy_dump_done,
4000 dpif_netlink_ct_get_timeout_policy_name,
4001 NULL, /* ipf_set_enabled */
4002 NULL, /* ipf_set_min_frag */
4003 NULL, /* ipf_set_max_nfrags */
4004 NULL, /* ipf_get_status */
4005 NULL, /* ipf_dump_start */
4006 NULL, /* ipf_dump_next */
4007 NULL, /* ipf_dump_done */
4008 dpif_netlink_meter_get_features,
4009 dpif_netlink_meter_set,
4010 dpif_netlink_meter_get,
4011 dpif_netlink_meter_del,
4012 NULL, /* bond_add */
4013 NULL, /* bond_del */
4014 NULL, /* bond_stats_get */
4015 };
4016
4017 static int
4018 dpif_netlink_init(void)
4019 {
4020 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
4021 static int error;
4022
4023 if (ovsthread_once_start(&once)) {
4024 error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
4025 &ovs_datapath_family);
4026 if (error) {
4027 VLOG_INFO("Generic Netlink family '%s' does not exist. "
4028 "The Open vSwitch kernel module is probably not loaded.",
4029 OVS_DATAPATH_FAMILY);
4030 }
4031 if (!error) {
4032 error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
4033 }
4034 if (!error) {
4035 error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
4036 }
4037 if (!error) {
4038 error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
4039 &ovs_packet_family);
4040 }
4041 if (!error) {
4042 error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
4043 &ovs_vport_mcgroup);
4044 }
4045 if (!error) {
4046 if (nl_lookup_genl_family(OVS_METER_FAMILY, &ovs_meter_family)) {
4047 VLOG_INFO("The kernel module does not support meters.");
4048 }
4049 }
4050 if (nl_lookup_genl_family(OVS_CT_LIMIT_FAMILY,
4051 &ovs_ct_limit_family) < 0) {
4052 VLOG_INFO("Generic Netlink family '%s' does not exist. "
4053 "Please update the Open vSwitch kernel module to enable "
4054 "the conntrack limit feature.", OVS_CT_LIMIT_FAMILY);
4055 }
4056
4057 ovs_tunnels_out_of_tree = dpif_netlink_rtnl_probe_oot_tunnels();
4058
4059 ovsthread_once_done(&once);
4060 }
4061
4062 return error;
4063 }
4064
4065 bool
4066 dpif_netlink_is_internal_device(const char *name)
4067 {
4068 struct dpif_netlink_vport reply;
4069 struct ofpbuf *buf;
4070 int error;
4071
4072 error = dpif_netlink_vport_get(name, &reply, &buf);
4073 if (!error) {
4074 ofpbuf_delete(buf);
4075 } else if (error != ENODEV && error != ENOENT) {
4076 VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
4077 name, ovs_strerror(error));
4078 }
4079
4080 return reply.type == OVS_VPORT_TYPE_INTERNAL;
4081 }
4082
4083 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4084 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
4085 * positive errno value.
4086 *
4087 * 'vport' will contain pointers into 'buf', so the caller should not free
4088 * 'buf' while 'vport' is still in use. */
4089 static int
4090 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
4091 const struct ofpbuf *buf)
4092 {
4093 static const struct nl_policy ovs_vport_policy[] = {
4094 [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
4095 [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
4096 [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
4097 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
4098 [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
4099 .optional = true },
4100 [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
4101 [OVS_VPORT_ATTR_NETNSID] = { .type = NL_A_U32, .optional = true },
4102 };
4103
4104 dpif_netlink_vport_init(vport);
4105
4106 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4107 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4108 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4109 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4110
4111 struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
4112 if (!nlmsg || !genl || !ovs_header
4113 || nlmsg->nlmsg_type != ovs_vport_family
4114 || !nl_policy_parse(&b, 0, ovs_vport_policy, a,
4115 ARRAY_SIZE(ovs_vport_policy))) {
4116 return EINVAL;
4117 }
4118
4119 vport->cmd = genl->cmd;
4120 vport->dp_ifindex = ovs_header->dp_ifindex;
4121 vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
4122 vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
4123 vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
4124 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
4125 vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
4126 / (sizeof *vport->upcall_pids);
4127 vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
4128
4129 }
4130 if (a[OVS_VPORT_ATTR_STATS]) {
4131 vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
4132 }
4133 if (a[OVS_VPORT_ATTR_OPTIONS]) {
4134 vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
4135 vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
4136 }
4137 if (a[OVS_VPORT_ATTR_NETNSID]) {
4138 netnsid_set(&vport->netnsid,
4139 nl_attr_get_u32(a[OVS_VPORT_ATTR_NETNSID]));
4140 } else {
4141 netnsid_set_local(&vport->netnsid);
4142 }
4143 return 0;
4144 }
4145
4146 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
4147 * followed by Netlink attributes corresponding to 'vport'. */
4148 static void
4149 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
4150 struct ofpbuf *buf)
4151 {
4152 struct ovs_header *ovs_header;
4153
4154 nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
4155 vport->cmd, OVS_VPORT_VERSION);
4156
4157 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4158 ovs_header->dp_ifindex = vport->dp_ifindex;
4159
4160 if (vport->port_no != ODPP_NONE) {
4161 nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
4162 }
4163
4164 if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
4165 nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
4166 }
4167
4168 if (vport->name) {
4169 nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
4170 }
4171
4172 if (vport->upcall_pids) {
4173 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
4174 vport->upcall_pids,
4175 vport->n_upcall_pids * sizeof *vport->upcall_pids);
4176 }
4177
4178 if (vport->stats) {
4179 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
4180 vport->stats, sizeof *vport->stats);
4181 }
4182
4183 if (vport->options) {
4184 nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
4185 vport->options, vport->options_len);
4186 }
4187 }
4188
4189 /* Clears 'vport' to "empty" values. */
4190 void
4191 dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
4192 {
4193 memset(vport, 0, sizeof *vport);
4194 vport->port_no = ODPP_NONE;
4195 }
4196
4197 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4198 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4199 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4200 * result of the command is expected to be an ovs_vport also, which is decoded
4201 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
4202 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
4203 int
4204 dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
4205 struct dpif_netlink_vport *reply,
4206 struct ofpbuf **bufp)
4207 {
4208 struct ofpbuf *request_buf;
4209 int error;
4210
4211 ovs_assert((reply != NULL) == (bufp != NULL));
4212
4213 error = dpif_netlink_init();
4214 if (error) {
4215 if (reply) {
4216 *bufp = NULL;
4217 dpif_netlink_vport_init(reply);
4218 }
4219 return error;
4220 }
4221
4222 request_buf = ofpbuf_new(1024);
4223 dpif_netlink_vport_to_ofpbuf(request, request_buf);
4224 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4225 ofpbuf_delete(request_buf);
4226
4227 if (reply) {
4228 if (!error) {
4229 error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
4230 }
4231 if (error) {
4232 dpif_netlink_vport_init(reply);
4233 ofpbuf_delete(*bufp);
4234 *bufp = NULL;
4235 }
4236 }
4237 return error;
4238 }
4239
4240 /* Obtains information about the kernel vport named 'name' and stores it into
4241 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
4242 * longer needed ('reply' will contain pointers into '*bufp'). */
4243 int
4244 dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
4245 struct ofpbuf **bufp)
4246 {
4247 struct dpif_netlink_vport request;
4248
4249 dpif_netlink_vport_init(&request);
4250 request.cmd = OVS_VPORT_CMD_GET;
4251 request.name = name;
4252
4253 return dpif_netlink_vport_transact(&request, reply, bufp);
4254 }
4255
4256 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4257 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
4258 * positive errno value.
4259 *
4260 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
4261 * while 'dp' is still in use. */
4262 static int
4263 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
4264 {
4265 static const struct nl_policy ovs_datapath_policy[] = {
4266 [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
4267 [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
4268 .optional = true },
4269 [OVS_DP_ATTR_MEGAFLOW_STATS] = {
4270 NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
4271 .optional = true },
4272 [OVS_DP_ATTR_USER_FEATURES] = {
4273 .type = NL_A_U32,
4274 .optional = true },
4275 };
4276
4277 dpif_netlink_dp_init(dp);
4278
4279 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4280 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4281 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4282 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4283
4284 struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
4285 if (!nlmsg || !genl || !ovs_header
4286 || nlmsg->nlmsg_type != ovs_datapath_family
4287 || !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
4288 ARRAY_SIZE(ovs_datapath_policy))) {
4289 return EINVAL;
4290 }
4291
4292 dp->cmd = genl->cmd;
4293 dp->dp_ifindex = ovs_header->dp_ifindex;
4294 dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
4295 if (a[OVS_DP_ATTR_STATS]) {
4296 dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
4297 }
4298
4299 if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
4300 dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
4301 }
4302
4303 if (a[OVS_DP_ATTR_USER_FEATURES]) {
4304 dp->user_features = nl_attr_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
4305 }
4306
4307 return 0;
4308 }
4309
4310 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
4311 static void
4312 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
4313 {
4314 struct ovs_header *ovs_header;
4315
4316 nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
4317 NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
4318 OVS_DATAPATH_VERSION);
4319
4320 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4321 ovs_header->dp_ifindex = dp->dp_ifindex;
4322
4323 if (dp->name) {
4324 nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
4325 }
4326
4327 if (dp->upcall_pid) {
4328 nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
4329 }
4330
4331 if (dp->user_features) {
4332 nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
4333 }
4334
4335 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
4336 }
4337
4338 /* Clears 'dp' to "empty" values. */
4339 static void
4340 dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
4341 {
4342 memset(dp, 0, sizeof *dp);
4343 }
4344
4345 static void
4346 dpif_netlink_dp_dump_start(struct nl_dump *dump)
4347 {
4348 struct dpif_netlink_dp request;
4349 struct ofpbuf *buf;
4350
4351 dpif_netlink_dp_init(&request);
4352 request.cmd = OVS_DP_CMD_GET;
4353
4354 buf = ofpbuf_new(1024);
4355 dpif_netlink_dp_to_ofpbuf(&request, buf);
4356 nl_dump_start(dump, NETLINK_GENERIC, buf);
4357 ofpbuf_delete(buf);
4358 }
4359
4360 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4361 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4362 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4363 * result of the command is expected to be of the same form, which is decoded
4364 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
4365 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
4366 static int
4367 dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
4368 struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
4369 {
4370 struct ofpbuf *request_buf;
4371 int error;
4372
4373 ovs_assert((reply != NULL) == (bufp != NULL));
4374
4375 request_buf = ofpbuf_new(1024);
4376 dpif_netlink_dp_to_ofpbuf(request, request_buf);
4377 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4378 ofpbuf_delete(request_buf);
4379
4380 if (reply) {
4381 dpif_netlink_dp_init(reply);
4382 if (!error) {
4383 error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
4384 }
4385 if (error) {
4386 ofpbuf_delete(*bufp);
4387 *bufp = NULL;
4388 }
4389 }
4390 return error;
4391 }
4392
4393 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
4394 * The caller must free '*bufp' when the reply is no longer needed ('reply'
4395 * will contain pointers into '*bufp'). */
4396 static int
4397 dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
4398 struct ofpbuf **bufp)
4399 {
4400 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
4401 struct dpif_netlink_dp request;
4402
4403 dpif_netlink_dp_init(&request);
4404 request.cmd = OVS_DP_CMD_GET;
4405 request.dp_ifindex = dpif->dp_ifindex;
4406
4407 return dpif_netlink_dp_transact(&request, reply, bufp);
4408 }
4409
4410 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4411 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
4412 * positive errno value.
4413 *
4414 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
4415 * while 'flow' is still in use. */
4416 static int
4417 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
4418 const struct ofpbuf *buf)
4419 {
4420 static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
4421 [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
4422 [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
4423 [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
4424 [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
4425 .optional = true },
4426 [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
4427 [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
4428 [OVS_FLOW_ATTR_UFID] = { .type = NL_A_U128, .optional = true },
4429 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
4430 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
4431 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
4432 };
4433
4434 dpif_netlink_flow_init(flow);
4435
4436 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4437 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4438 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4439 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4440
4441 struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
4442 if (!nlmsg || !genl || !ovs_header
4443 || nlmsg->nlmsg_type != ovs_flow_family
4444 || !nl_policy_parse(&b, 0, ovs_flow_policy, a,
4445 ARRAY_SIZE(ovs_flow_policy))) {
4446 return EINVAL;
4447 }
4448 if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
4449 return EINVAL;
4450 }
4451
4452 flow->nlmsg_flags = nlmsg->nlmsg_flags;
4453 flow->dp_ifindex = ovs_header->dp_ifindex;
4454 if (a[OVS_FLOW_ATTR_KEY]) {
4455 flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
4456 flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
4457 }
4458
4459 if (a[OVS_FLOW_ATTR_UFID]) {
4460 flow->ufid = nl_attr_get_u128(a[OVS_FLOW_ATTR_UFID]);
4461 flow->ufid_present = true;
4462 }
4463 if (a[OVS_FLOW_ATTR_MASK]) {
4464 flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
4465 flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
4466 }
4467 if (a[OVS_FLOW_ATTR_ACTIONS]) {
4468 flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
4469 flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
4470 }
4471 if (a[OVS_FLOW_ATTR_STATS]) {
4472 flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
4473 }
4474 if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
4475 flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
4476 }
4477 if (a[OVS_FLOW_ATTR_USED]) {
4478 flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
4479 }
4480 return 0;
4481 }
4482
4483
4484 /*
4485 * If PACKET_TYPE attribute is present in 'data', it filters PACKET_TYPE out.
4486 * If the flow is not Ethernet, the OVS_KEY_ATTR_PACKET_TYPE is converted to
4487 * OVS_KEY_ATTR_ETHERTYPE. Puts 'data' to 'buf'.
4488 */
4489 static void
4490 put_exclude_packet_type(struct ofpbuf *buf, uint16_t type,
4491 const struct nlattr *data, uint16_t data_len)
4492 {
4493 const struct nlattr *packet_type;
4494
4495 packet_type = nl_attr_find__(data, data_len, OVS_KEY_ATTR_PACKET_TYPE);
4496
4497 if (packet_type) {
4498 /* exclude PACKET_TYPE Netlink attribute. */
4499 ovs_assert(NLA_ALIGN(packet_type->nla_len) == NL_A_U32_SIZE);
4500 size_t packet_type_len = NL_A_U32_SIZE;
4501 size_t first_chunk_size = (uint8_t *)packet_type - (uint8_t *)data;
4502 size_t second_chunk_size = data_len - first_chunk_size
4503 - packet_type_len;
4504 struct nlattr *next_attr = nl_attr_next(packet_type);
4505 size_t ofs;
4506
4507 ofs = nl_msg_start_nested(buf, type);
4508 nl_msg_put(buf, data, first_chunk_size);
4509 nl_msg_put(buf, next_attr, second_chunk_size);
4510 if (!nl_attr_find__(data, data_len, OVS_KEY_ATTR_ETHERNET)) {
4511 ovs_be16 pt = pt_ns_type_be(nl_attr_get_be32(packet_type));
4512 const struct nlattr *nla;
4513
4514 nla = nl_attr_find(buf, ofs + NLA_HDRLEN, OVS_KEY_ATTR_ETHERTYPE);
4515 if (nla) {
4516 ovs_be16 *ethertype;
4517
4518 ethertype = CONST_CAST(ovs_be16 *, nl_attr_get(nla));
4519 *ethertype = pt;
4520 } else {
4521 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, pt);
4522 }
4523 }
4524 nl_msg_end_nested(buf, ofs);
4525 } else {
4526 nl_msg_put_unspec(buf, type, data, data_len);
4527 }
4528 }
4529
4530 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
4531 * followed by Netlink attributes corresponding to 'flow'. */
4532 static void
4533 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
4534 struct ofpbuf *buf)
4535 {
4536 struct ovs_header *ovs_header;
4537
4538 nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
4539 NLM_F_REQUEST | flow->nlmsg_flags,
4540 flow->cmd, OVS_FLOW_VERSION);
4541
4542 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4543 ovs_header->dp_ifindex = flow->dp_ifindex;
4544
4545 if (flow->ufid_present) {
4546 nl_msg_put_u128(buf, OVS_FLOW_ATTR_UFID, flow->ufid);
4547 }
4548 if (flow->ufid_terse) {
4549 nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
4550 OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
4551 | OVS_UFID_F_OMIT_ACTIONS);
4552 }
4553 if (!flow->ufid_terse || !flow->ufid_present) {
4554 if (flow->key_len) {
4555 put_exclude_packet_type(buf, OVS_FLOW_ATTR_KEY, flow->key,
4556 flow->key_len);
4557 }
4558 if (flow->mask_len) {
4559 put_exclude_packet_type(buf, OVS_FLOW_ATTR_MASK, flow->mask,
4560 flow->mask_len);
4561 }
4562 if (flow->actions || flow->actions_len) {
4563 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
4564 flow->actions, flow->actions_len);
4565 }
4566 }
4567
4568 /* We never need to send these to the kernel. */
4569 ovs_assert(!flow->stats);
4570 ovs_assert(!flow->tcp_flags);
4571 ovs_assert(!flow->used);
4572
4573 if (flow->clear) {
4574 nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
4575 }
4576 if (flow->probe) {
4577 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
4578 }
4579 }
4580
4581 /* Clears 'flow' to "empty" values. */
4582 static void
4583 dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
4584 {
4585 memset(flow, 0, sizeof *flow);
4586 }
4587
4588 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4589 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4590 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4591 * result of the command is expected to be a flow also, which is decoded and
4592 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
4593 * is no longer needed ('reply' will contain pointers into '*bufp'). */
4594 static int
4595 dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
4596 struct dpif_netlink_flow *reply,
4597 struct ofpbuf **bufp)
4598 {
4599 struct ofpbuf *request_buf;
4600 int error;
4601
4602 ovs_assert((reply != NULL) == (bufp != NULL));
4603
4604 if (reply) {
4605 request->nlmsg_flags |= NLM_F_ECHO;
4606 }
4607
4608 request_buf = ofpbuf_new(1024);
4609 dpif_netlink_flow_to_ofpbuf(request, request_buf);
4610 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4611 ofpbuf_delete(request_buf);
4612
4613 if (reply) {
4614 if (!error) {
4615 error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
4616 }
4617 if (error) {
4618 dpif_netlink_flow_init(reply);
4619 ofpbuf_delete(*bufp);
4620 *bufp = NULL;
4621 }
4622 }
4623 return error;
4624 }
4625
4626 static void
4627 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
4628 struct dpif_flow_stats *stats)
4629 {
4630 if (flow->stats) {
4631 stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
4632 stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
4633 } else {
4634 stats->n_packets = 0;
4635 stats->n_bytes = 0;
4636 }
4637 stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
4638 stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
4639 }
4640
4641 /* Logs information about a packet that was recently lost in 'ch' (in
4642 * 'dpif_'). */
4643 static void
4644 report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
4645 uint32_t handler_id)
4646 {
4647 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
4648 struct ds s;
4649
4650 if (VLOG_DROP_WARN(&rl)) {
4651 return;
4652 }
4653
4654 ds_init(&s);
4655 if (ch->last_poll != LLONG_MIN) {
4656 ds_put_format(&s, " (last polled %lld ms ago)",
4657 time_msec() - ch->last_poll);
4658 }
4659
4660 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
4661 dpif_name(&dpif->dpif), ch_idx, handler_id);
4662 ds_destroy(&s);
4663 }