]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif-netlink.c
eee6f3206e3f717a89d66f78f764265f9e5a80e7
[mirror_ovs.git] / lib / dpif-netlink.c
1 /*
2 * Copyright (c) 2008-2018 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dpif-netlink.h"
20
21 #include <ctype.h>
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <net/if.h>
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
28 #include <poll.h>
29 #include <stdlib.h>
30 #include <strings.h>
31 #include <sys/epoll.h>
32 #include <sys/stat.h>
33 #include <unistd.h>
34
35 #include "bitmap.h"
36 #include "dpif-netlink-rtnl.h"
37 #include "dpif-provider.h"
38 #include "fat-rwlock.h"
39 #include "flow.h"
40 #include "netdev-linux.h"
41 #include "netdev-offload.h"
42 #include "netdev-provider.h"
43 #include "netdev-vport.h"
44 #include "netdev.h"
45 #include "netlink-conntrack.h"
46 #include "netlink-notifier.h"
47 #include "netlink-socket.h"
48 #include "netlink.h"
49 #include "netnsid.h"
50 #include "odp-util.h"
51 #include "openvswitch/dynamic-string.h"
52 #include "openvswitch/flow.h"
53 #include "openvswitch/hmap.h"
54 #include "openvswitch/match.h"
55 #include "openvswitch/ofpbuf.h"
56 #include "openvswitch/poll-loop.h"
57 #include "openvswitch/shash.h"
58 #include "openvswitch/thread.h"
59 #include "openvswitch/vlog.h"
60 #include "packets.h"
61 #include "random.h"
62 #include "sset.h"
63 #include "timeval.h"
64 #include "unaligned.h"
65 #include "util.h"
66
67 VLOG_DEFINE_THIS_MODULE(dpif_netlink);
68 #ifdef _WIN32
69 #include "wmi.h"
70 enum { WINDOWS = 1 };
71 #else
72 enum { WINDOWS = 0 };
73 #endif
74 enum { MAX_PORTS = USHRT_MAX };
75
76 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
77 * missing if we have old headers. */
78 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
79
80 #define FLOW_DUMP_MAX_BATCH 50
81 #define OPERATE_MAX_OPS 50
82
83 #ifndef EPOLLEXCLUSIVE
84 #define EPOLLEXCLUSIVE (1u << 28)
85 #endif
86
87 struct dpif_netlink_dp {
88 /* Generic Netlink header. */
89 uint8_t cmd;
90
91 /* struct ovs_header. */
92 int dp_ifindex;
93
94 /* Attributes. */
95 const char *name; /* OVS_DP_ATTR_NAME. */
96 const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
97 uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
98 const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
99 const struct ovs_dp_megaflow_stats *megaflow_stats;
100 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
101 };
102
103 static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
104 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
105 const struct ofpbuf *);
106 static void dpif_netlink_dp_dump_start(struct nl_dump *);
107 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
108 struct dpif_netlink_dp *reply,
109 struct ofpbuf **bufp);
110 static int dpif_netlink_dp_get(const struct dpif *,
111 struct dpif_netlink_dp *reply,
112 struct ofpbuf **bufp);
113
114 struct dpif_netlink_flow {
115 /* Generic Netlink header. */
116 uint8_t cmd;
117
118 /* struct ovs_header. */
119 unsigned int nlmsg_flags;
120 int dp_ifindex;
121
122 /* Attributes.
123 *
124 * The 'stats' member points to 64-bit data that might only be aligned on
125 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
126 * values.
127 *
128 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
129 * the Netlink version of the command, even if actions_len is zero. */
130 const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
131 size_t key_len;
132 const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
133 size_t mask_len;
134 const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
135 size_t actions_len;
136 ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
137 bool ufid_present; /* Is there a UFID? */
138 bool ufid_terse; /* Skip serializing key/mask/acts? */
139 const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
140 const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
141 const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
142 bool clear; /* OVS_FLOW_ATTR_CLEAR. */
143 bool probe; /* OVS_FLOW_ATTR_PROBE. */
144 };
145
146 static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
147 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
148 const struct ofpbuf *);
149 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
150 struct ofpbuf *);
151 static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
152 struct dpif_netlink_flow *reply,
153 struct ofpbuf **bufp);
154 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
155 struct dpif_flow_stats *);
156 static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
157 const struct dpif_netlink_flow *);
158
159 /* One of the dpif channels between the kernel and userspace. */
160 struct dpif_channel {
161 struct nl_sock *sock; /* Netlink socket. */
162 long long int last_poll; /* Last time this channel was polled. */
163 };
164
165 #ifdef _WIN32
166 #define VPORT_SOCK_POOL_SIZE 1
167 /* On Windows, there is no native support for epoll. There are equivalent
168 * interfaces though, that are not used currently. For simpicity, a pool of
169 * netlink sockets is used. Each socket is represented by 'struct
170 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
171 * sharing the same socket. In the future, we can add a reference count and
172 * such fields. */
173 struct dpif_windows_vport_sock {
174 struct nl_sock *nl_sock; /* netlink socket. */
175 };
176 #endif
177
178 struct dpif_handler {
179 struct epoll_event *epoll_events;
180 int epoll_fd; /* epoll fd that includes channel socks. */
181 int n_events; /* Num events returned by epoll_wait(). */
182 int event_offset; /* Offset into 'epoll_events'. */
183
184 #ifdef _WIN32
185 /* Pool of sockets. */
186 struct dpif_windows_vport_sock *vport_sock_pool;
187 size_t last_used_pool_idx; /* Index to aid in allocating a
188 socket in the pool to a port. */
189 #endif
190 };
191
192 /* Datapath interface for the openvswitch Linux kernel module. */
193 struct dpif_netlink {
194 struct dpif dpif;
195 int dp_ifindex;
196
197 /* Upcall messages. */
198 struct fat_rwlock upcall_lock;
199 struct dpif_handler *handlers;
200 uint32_t n_handlers; /* Num of upcall handlers. */
201 struct dpif_channel *channels; /* Array of channels for each port. */
202 int uc_array_size; /* Size of 'handler->channels' and */
203 /* 'handler->epoll_events'. */
204
205 /* Change notification. */
206 struct nl_sock *port_notifier; /* vport multicast group subscriber. */
207 bool refresh_channels;
208 };
209
210 static void report_loss(struct dpif_netlink *, struct dpif_channel *,
211 uint32_t ch_idx, uint32_t handler_id);
212
213 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
214
215 /* Generic Netlink family numbers for OVS.
216 *
217 * Initialized by dpif_netlink_init(). */
218 static int ovs_datapath_family;
219 static int ovs_vport_family;
220 static int ovs_flow_family;
221 static int ovs_packet_family;
222 static int ovs_meter_family;
223 static int ovs_ct_limit_family;
224
225 /* Generic Netlink multicast groups for OVS.
226 *
227 * Initialized by dpif_netlink_init(). */
228 static unsigned int ovs_vport_mcgroup;
229
230 /* If true, tunnel devices are created using OVS compat/genetlink.
231 * If false, tunnel devices are created with rtnetlink and using light weight
232 * tunnels. If we fail to create the tunnel the rtnetlink+LWT, then we fallback
233 * to using the compat interface. */
234 static bool ovs_tunnels_out_of_tree = true;
235
236 static int dpif_netlink_init(void);
237 static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
238 static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
239 odp_port_t port_no);
240 static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
241 static int dpif_netlink_refresh_channels(struct dpif_netlink *,
242 uint32_t n_handlers);
243 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
244 struct ofpbuf *);
245 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
246 const struct ofpbuf *);
247 static int dpif_netlink_port_query__(const struct dpif_netlink *dpif,
248 odp_port_t port_no, const char *port_name,
249 struct dpif_port *dpif_port);
250
251 static int
252 create_nl_sock(struct dpif_netlink *dpif OVS_UNUSED, struct nl_sock **socksp)
253 OVS_REQ_WRLOCK(dpif->upcall_lock)
254 {
255 #ifndef _WIN32
256 return nl_sock_create(NETLINK_GENERIC, socksp);
257 #else
258 /* Pick netlink sockets to use in a round-robin fashion from each
259 * handler's pool of sockets. */
260 struct dpif_handler *handler = &dpif->handlers[0];
261 struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
262 size_t index = handler->last_used_pool_idx;
263
264 /* A pool of sockets is allocated when the handler is initialized. */
265 if (sock_pool == NULL) {
266 *socksp = NULL;
267 return EINVAL;
268 }
269
270 ovs_assert(index < VPORT_SOCK_POOL_SIZE);
271 *socksp = sock_pool[index].nl_sock;
272 ovs_assert(*socksp);
273 index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
274 handler->last_used_pool_idx = index;
275 return 0;
276 #endif
277 }
278
279 static void
280 close_nl_sock(struct nl_sock *socksp)
281 {
282 #ifndef _WIN32
283 nl_sock_destroy(socksp);
284 #endif
285 }
286
287 static struct dpif_netlink *
288 dpif_netlink_cast(const struct dpif *dpif)
289 {
290 dpif_assert_class(dpif, &dpif_netlink_class);
291 return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
292 }
293
294 static int
295 dpif_netlink_enumerate(struct sset *all_dps,
296 const struct dpif_class *dpif_class OVS_UNUSED)
297 {
298 struct nl_dump dump;
299 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
300 struct ofpbuf msg, buf;
301 int error;
302
303 error = dpif_netlink_init();
304 if (error) {
305 return error;
306 }
307
308 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
309 dpif_netlink_dp_dump_start(&dump);
310 while (nl_dump_next(&dump, &msg, &buf)) {
311 struct dpif_netlink_dp dp;
312
313 if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
314 sset_add(all_dps, dp.name);
315 }
316 }
317 ofpbuf_uninit(&buf);
318 return nl_dump_done(&dump);
319 }
320
321 static int
322 dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
323 bool create, struct dpif **dpifp)
324 {
325 struct dpif_netlink_dp dp_request, dp;
326 struct ofpbuf *buf;
327 uint32_t upcall_pid;
328 int error;
329
330 error = dpif_netlink_init();
331 if (error) {
332 return error;
333 }
334
335 /* Create or look up datapath. */
336 dpif_netlink_dp_init(&dp_request);
337 if (create) {
338 dp_request.cmd = OVS_DP_CMD_NEW;
339 upcall_pid = 0;
340 dp_request.upcall_pid = &upcall_pid;
341 } else {
342 /* Use OVS_DP_CMD_SET to report user features */
343 dp_request.cmd = OVS_DP_CMD_SET;
344 }
345 dp_request.name = name;
346 dp_request.user_features |= OVS_DP_F_UNALIGNED;
347 dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
348 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
349 if (error) {
350 return error;
351 }
352
353 error = open_dpif(&dp, dpifp);
354 ofpbuf_delete(buf);
355 return error;
356 }
357
358 static int
359 open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
360 {
361 struct dpif_netlink *dpif;
362
363 dpif = xzalloc(sizeof *dpif);
364 dpif->port_notifier = NULL;
365 fat_rwlock_init(&dpif->upcall_lock);
366
367 dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
368 dp->dp_ifindex, dp->dp_ifindex);
369
370 dpif->dp_ifindex = dp->dp_ifindex;
371 *dpifp = &dpif->dpif;
372
373 return 0;
374 }
375
376 #ifdef _WIN32
377 static void
378 vport_delete_sock_pool(struct dpif_handler *handler)
379 OVS_REQ_WRLOCK(dpif->upcall_lock)
380 {
381 if (handler->vport_sock_pool) {
382 uint32_t i;
383 struct dpif_windows_vport_sock *sock_pool =
384 handler->vport_sock_pool;
385
386 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
387 if (sock_pool[i].nl_sock) {
388 nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
389 nl_sock_destroy(sock_pool[i].nl_sock);
390 sock_pool[i].nl_sock = NULL;
391 }
392 }
393
394 free(handler->vport_sock_pool);
395 handler->vport_sock_pool = NULL;
396 }
397 }
398
399 static int
400 vport_create_sock_pool(struct dpif_handler *handler)
401 OVS_REQ_WRLOCK(dpif->upcall_lock)
402 {
403 struct dpif_windows_vport_sock *sock_pool;
404 size_t i;
405 int error = 0;
406
407 sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
408 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
409 error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
410 if (error) {
411 goto error;
412 }
413
414 /* Enable the netlink socket to receive packets. This is equivalent to
415 * calling nl_sock_join_mcgroup() to receive events. */
416 error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
417 if (error) {
418 goto error;
419 }
420 }
421
422 handler->vport_sock_pool = sock_pool;
423 handler->last_used_pool_idx = 0;
424 return 0;
425
426 error:
427 vport_delete_sock_pool(handler);
428 return error;
429 }
430 #endif /* _WIN32 */
431
432 /* Given the port number 'port_idx', extracts the pid of netlink socket
433 * associated to the port and assigns it to 'upcall_pid'. */
434 static bool
435 vport_get_pid(struct dpif_netlink *dpif, uint32_t port_idx,
436 uint32_t *upcall_pid)
437 {
438 /* Since the nl_sock can only be assigned in either all
439 * or none "dpif" channels, the following check
440 * would suffice. */
441 if (!dpif->channels[port_idx].sock) {
442 return false;
443 }
444 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
445
446 *upcall_pid = nl_sock_pid(dpif->channels[port_idx].sock);
447
448 return true;
449 }
450
451 static int
452 vport_add_channel(struct dpif_netlink *dpif, odp_port_t port_no,
453 struct nl_sock *socksp)
454 {
455 struct epoll_event event;
456 uint32_t port_idx = odp_to_u32(port_no);
457 size_t i;
458 int error;
459
460 if (dpif->handlers == NULL) {
461 return 0;
462 }
463
464 /* We assume that the datapath densely chooses port numbers, which can
465 * therefore be used as an index into 'channels' and 'epoll_events' of
466 * 'dpif'. */
467 if (port_idx >= dpif->uc_array_size) {
468 uint32_t new_size = port_idx + 1;
469
470 if (new_size > MAX_PORTS) {
471 VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
472 dpif_name(&dpif->dpif), port_no);
473 return EFBIG;
474 }
475
476 dpif->channels = xrealloc(dpif->channels,
477 new_size * sizeof *dpif->channels);
478
479 for (i = dpif->uc_array_size; i < new_size; i++) {
480 dpif->channels[i].sock = NULL;
481 }
482
483 for (i = 0; i < dpif->n_handlers; i++) {
484 struct dpif_handler *handler = &dpif->handlers[i];
485
486 handler->epoll_events = xrealloc(handler->epoll_events,
487 new_size * sizeof *handler->epoll_events);
488
489 }
490 dpif->uc_array_size = new_size;
491 }
492
493 memset(&event, 0, sizeof event);
494 event.events = EPOLLIN | EPOLLEXCLUSIVE;
495 event.data.u32 = port_idx;
496
497 for (i = 0; i < dpif->n_handlers; i++) {
498 struct dpif_handler *handler = &dpif->handlers[i];
499
500 #ifndef _WIN32
501 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp),
502 &event) < 0) {
503 error = errno;
504 goto error;
505 }
506 #endif
507 }
508 dpif->channels[port_idx].sock = socksp;
509 dpif->channels[port_idx].last_poll = LLONG_MIN;
510
511 return 0;
512
513 error:
514 #ifndef _WIN32
515 while (i--) {
516 epoll_ctl(dpif->handlers[i].epoll_fd, EPOLL_CTL_DEL,
517 nl_sock_fd(socksp), NULL);
518 }
519 #endif
520 dpif->channels[port_idx].sock = NULL;
521
522 return error;
523 }
524
525 static void
526 vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
527 {
528 uint32_t port_idx = odp_to_u32(port_no);
529 size_t i;
530
531 if (!dpif->handlers || port_idx >= dpif->uc_array_size
532 || !dpif->channels[port_idx].sock) {
533 return;
534 }
535
536 for (i = 0; i < dpif->n_handlers; i++) {
537 struct dpif_handler *handler = &dpif->handlers[i];
538 #ifndef _WIN32
539 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
540 nl_sock_fd(dpif->channels[port_idx].sock), NULL);
541 #endif
542 handler->event_offset = handler->n_events = 0;
543 }
544 #ifndef _WIN32
545 nl_sock_destroy(dpif->channels[port_idx].sock);
546 #endif
547 dpif->channels[port_idx].sock = NULL;
548 }
549
550 static void
551 destroy_all_channels(struct dpif_netlink *dpif)
552 OVS_REQ_WRLOCK(dpif->upcall_lock)
553 {
554 unsigned int i;
555
556 if (!dpif->handlers) {
557 return;
558 }
559
560 for (i = 0; i < dpif->uc_array_size; i++ ) {
561 struct dpif_netlink_vport vport_request;
562 uint32_t upcall_pids = 0;
563
564 if (!dpif->channels[i].sock) {
565 continue;
566 }
567
568 /* Turn off upcalls. */
569 dpif_netlink_vport_init(&vport_request);
570 vport_request.cmd = OVS_VPORT_CMD_SET;
571 vport_request.dp_ifindex = dpif->dp_ifindex;
572 vport_request.port_no = u32_to_odp(i);
573 vport_request.n_upcall_pids = 1;
574 vport_request.upcall_pids = &upcall_pids;
575 dpif_netlink_vport_transact(&vport_request, NULL, NULL);
576
577 vport_del_channels(dpif, u32_to_odp(i));
578 }
579
580 for (i = 0; i < dpif->n_handlers; i++) {
581 struct dpif_handler *handler = &dpif->handlers[i];
582
583 dpif_netlink_handler_uninit(handler);
584 free(handler->epoll_events);
585 }
586 free(dpif->channels);
587 free(dpif->handlers);
588 dpif->handlers = NULL;
589 dpif->channels = NULL;
590 dpif->n_handlers = 0;
591 dpif->uc_array_size = 0;
592 }
593
594 static void
595 dpif_netlink_close(struct dpif *dpif_)
596 {
597 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
598
599 nl_sock_destroy(dpif->port_notifier);
600
601 fat_rwlock_wrlock(&dpif->upcall_lock);
602 destroy_all_channels(dpif);
603 fat_rwlock_unlock(&dpif->upcall_lock);
604
605 fat_rwlock_destroy(&dpif->upcall_lock);
606 free(dpif);
607 }
608
609 static int
610 dpif_netlink_destroy(struct dpif *dpif_)
611 {
612 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
613 struct dpif_netlink_dp dp;
614
615 dpif_netlink_dp_init(&dp);
616 dp.cmd = OVS_DP_CMD_DEL;
617 dp.dp_ifindex = dpif->dp_ifindex;
618 return dpif_netlink_dp_transact(&dp, NULL, NULL);
619 }
620
621 static bool
622 dpif_netlink_run(struct dpif *dpif_)
623 {
624 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
625
626 if (dpif->refresh_channels) {
627 dpif->refresh_channels = false;
628 fat_rwlock_wrlock(&dpif->upcall_lock);
629 dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
630 fat_rwlock_unlock(&dpif->upcall_lock);
631 }
632 return false;
633 }
634
635 static int
636 dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
637 {
638 struct dpif_netlink_dp dp;
639 struct ofpbuf *buf;
640 int error;
641
642 error = dpif_netlink_dp_get(dpif_, &dp, &buf);
643 if (!error) {
644 memset(stats, 0, sizeof *stats);
645
646 if (dp.stats) {
647 stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
648 stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
649 stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
650 stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
651 }
652
653 if (dp.megaflow_stats) {
654 stats->n_masks = dp.megaflow_stats->n_masks;
655 stats->n_mask_hit = get_32aligned_u64(
656 &dp.megaflow_stats->n_mask_hit);
657 } else {
658 stats->n_masks = UINT32_MAX;
659 stats->n_mask_hit = UINT64_MAX;
660 }
661 ofpbuf_delete(buf);
662 }
663 return error;
664 }
665
666 static const char *
667 get_vport_type(const struct dpif_netlink_vport *vport)
668 {
669 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
670
671 switch (vport->type) {
672 case OVS_VPORT_TYPE_NETDEV: {
673 const char *type = netdev_get_type_from_name(vport->name);
674
675 return type ? type : "system";
676 }
677
678 case OVS_VPORT_TYPE_INTERNAL:
679 return "internal";
680
681 case OVS_VPORT_TYPE_GENEVE:
682 return "geneve";
683
684 case OVS_VPORT_TYPE_GRE:
685 return "gre";
686
687 case OVS_VPORT_TYPE_VXLAN:
688 return "vxlan";
689
690 case OVS_VPORT_TYPE_LISP:
691 return "lisp";
692
693 case OVS_VPORT_TYPE_STT:
694 return "stt";
695
696 case OVS_VPORT_TYPE_ERSPAN:
697 return "erspan";
698
699 case OVS_VPORT_TYPE_IP6ERSPAN:
700 return "ip6erspan";
701
702 case OVS_VPORT_TYPE_IP6GRE:
703 return "ip6gre";
704
705 case OVS_VPORT_TYPE_UNSPEC:
706 case __OVS_VPORT_TYPE_MAX:
707 break;
708 }
709
710 VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
711 vport->dp_ifindex, vport->name, (unsigned int) vport->type);
712 return "unknown";
713 }
714
715 enum ovs_vport_type
716 netdev_to_ovs_vport_type(const char *type)
717 {
718 if (!strcmp(type, "tap") || !strcmp(type, "system")) {
719 return OVS_VPORT_TYPE_NETDEV;
720 } else if (!strcmp(type, "internal")) {
721 return OVS_VPORT_TYPE_INTERNAL;
722 } else if (strstr(type, "stt")) {
723 return OVS_VPORT_TYPE_STT;
724 } else if (!strcmp(type, "geneve")) {
725 return OVS_VPORT_TYPE_GENEVE;
726 } else if (!strcmp(type, "vxlan")) {
727 return OVS_VPORT_TYPE_VXLAN;
728 } else if (!strcmp(type, "lisp")) {
729 return OVS_VPORT_TYPE_LISP;
730 } else if (!strcmp(type, "erspan")) {
731 return OVS_VPORT_TYPE_ERSPAN;
732 } else if (!strcmp(type, "ip6erspan")) {
733 return OVS_VPORT_TYPE_IP6ERSPAN;
734 } else if (!strcmp(type, "ip6gre")) {
735 return OVS_VPORT_TYPE_IP6GRE;
736 } else if (!strcmp(type, "gre")) {
737 return OVS_VPORT_TYPE_GRE;
738 } else {
739 return OVS_VPORT_TYPE_UNSPEC;
740 }
741 }
742
743 static int
744 dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name,
745 enum ovs_vport_type type,
746 struct ofpbuf *options,
747 odp_port_t *port_nop)
748 OVS_REQ_WRLOCK(dpif->upcall_lock)
749 {
750 struct dpif_netlink_vport request, reply;
751 struct ofpbuf *buf;
752 struct nl_sock *socksp = NULL;
753 uint32_t upcall_pids = 0;
754 int error = 0;
755
756 if (dpif->handlers) {
757 error = create_nl_sock(dpif, &socksp);
758 if (error) {
759 return error;
760 }
761 }
762
763 dpif_netlink_vport_init(&request);
764 request.cmd = OVS_VPORT_CMD_NEW;
765 request.dp_ifindex = dpif->dp_ifindex;
766 request.type = type;
767 request.name = name;
768
769 request.port_no = *port_nop;
770 if (socksp) {
771 upcall_pids = nl_sock_pid(socksp);
772 }
773 request.n_upcall_pids = 1;
774 request.upcall_pids = &upcall_pids;
775
776 if (options) {
777 request.options = options->data;
778 request.options_len = options->size;
779 }
780
781 error = dpif_netlink_vport_transact(&request, &reply, &buf);
782 if (!error) {
783 *port_nop = reply.port_no;
784 } else {
785 if (error == EBUSY && *port_nop != ODPP_NONE) {
786 VLOG_INFO("%s: requested port %"PRIu32" is in use",
787 dpif_name(&dpif->dpif), *port_nop);
788 }
789
790 close_nl_sock(socksp);
791 goto exit;
792 }
793
794 error = vport_add_channel(dpif, *port_nop, socksp);
795 if (error) {
796 VLOG_INFO("%s: could not add channel for port %s",
797 dpif_name(&dpif->dpif), name);
798
799 /* Delete the port. */
800 dpif_netlink_vport_init(&request);
801 request.cmd = OVS_VPORT_CMD_DEL;
802 request.dp_ifindex = dpif->dp_ifindex;
803 request.port_no = *port_nop;
804 dpif_netlink_vport_transact(&request, NULL, NULL);
805 close_nl_sock(socksp);
806 goto exit;
807 }
808
809 exit:
810 ofpbuf_delete(buf);
811
812 return error;
813 }
814
815 static int
816 dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev,
817 odp_port_t *port_nop)
818 OVS_REQ_WRLOCK(dpif->upcall_lock)
819 {
820 const struct netdev_tunnel_config *tnl_cfg;
821 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
822 const char *type = netdev_get_type(netdev);
823 uint64_t options_stub[64 / 8];
824 enum ovs_vport_type ovs_type;
825 struct ofpbuf options;
826 const char *name;
827
828 name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
829
830 ovs_type = netdev_to_ovs_vport_type(netdev_get_type(netdev));
831 if (ovs_type == OVS_VPORT_TYPE_UNSPEC) {
832 VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
833 "unsupported type `%s'",
834 dpif_name(&dpif->dpif), name, type);
835 return EINVAL;
836 }
837
838 if (ovs_type == OVS_VPORT_TYPE_NETDEV) {
839 #ifdef _WIN32
840 /* XXX : Map appropiate Windows handle */
841 #else
842 netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
843 #endif
844 }
845
846 #ifdef _WIN32
847 if (ovs_type == OVS_VPORT_TYPE_INTERNAL) {
848 if (!create_wmi_port(name)){
849 VLOG_ERR("Could not create wmi internal port with name:%s", name);
850 return EINVAL;
851 };
852 }
853 #endif
854
855 tnl_cfg = netdev_get_tunnel_config(netdev);
856 if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) {
857 ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
858 if (tnl_cfg->dst_port) {
859 nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
860 ntohs(tnl_cfg->dst_port));
861 }
862 if (tnl_cfg->exts) {
863 size_t ext_ofs;
864 int i;
865
866 ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
867 for (i = 0; i < 32; i++) {
868 if (tnl_cfg->exts & (1 << i)) {
869 nl_msg_put_flag(&options, i);
870 }
871 }
872 nl_msg_end_nested(&options, ext_ofs);
873 }
874 return dpif_netlink_port_add__(dpif, name, ovs_type, &options,
875 port_nop);
876 } else {
877 return dpif_netlink_port_add__(dpif, name, ovs_type, NULL, port_nop);
878 }
879
880 }
881
882 static int
883 dpif_netlink_rtnl_port_create_and_add(struct dpif_netlink *dpif,
884 struct netdev *netdev,
885 odp_port_t *port_nop)
886 OVS_REQ_WRLOCK(dpif->upcall_lock)
887 {
888 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
889 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
890 const char *name;
891 int error;
892
893 error = dpif_netlink_rtnl_port_create(netdev);
894 if (error) {
895 if (error != EOPNOTSUPP) {
896 VLOG_WARN_RL(&rl, "Failed to create %s with rtnetlink: %s",
897 netdev_get_name(netdev), ovs_strerror(error));
898 }
899 return error;
900 }
901
902 name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
903 error = dpif_netlink_port_add__(dpif, name, OVS_VPORT_TYPE_NETDEV, NULL,
904 port_nop);
905 if (error) {
906 dpif_netlink_rtnl_port_destroy(name, netdev_get_type(netdev));
907 }
908 return error;
909 }
910
911 static int
912 dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
913 odp_port_t *port_nop)
914 {
915 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
916 int error = EOPNOTSUPP;
917
918 fat_rwlock_wrlock(&dpif->upcall_lock);
919 if (!ovs_tunnels_out_of_tree) {
920 error = dpif_netlink_rtnl_port_create_and_add(dpif, netdev, port_nop);
921 }
922 if (error) {
923 error = dpif_netlink_port_add_compat(dpif, netdev, port_nop);
924 }
925 fat_rwlock_unlock(&dpif->upcall_lock);
926
927 return error;
928 }
929
930 static int
931 dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
932 OVS_REQ_WRLOCK(dpif->upcall_lock)
933 {
934 struct dpif_netlink_vport vport;
935 struct dpif_port dpif_port;
936 int error;
937
938 error = dpif_netlink_port_query__(dpif, port_no, NULL, &dpif_port);
939 if (error) {
940 return error;
941 }
942
943 dpif_netlink_vport_init(&vport);
944 vport.cmd = OVS_VPORT_CMD_DEL;
945 vport.dp_ifindex = dpif->dp_ifindex;
946 vport.port_no = port_no;
947 #ifdef _WIN32
948 if (!strcmp(dpif_port.type, "internal")) {
949 if (!delete_wmi_port(dpif_port.name)) {
950 VLOG_ERR("Could not delete wmi port with name: %s",
951 dpif_port.name);
952 };
953 }
954 #endif
955 error = dpif_netlink_vport_transact(&vport, NULL, NULL);
956
957 vport_del_channels(dpif, port_no);
958
959 if (!error && !ovs_tunnels_out_of_tree) {
960 error = dpif_netlink_rtnl_port_destroy(dpif_port.name, dpif_port.type);
961 if (error == EOPNOTSUPP) {
962 error = 0;
963 }
964 }
965
966 dpif_port_destroy(&dpif_port);
967
968 return error;
969 }
970
971 static int
972 dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
973 {
974 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
975 int error;
976
977 fat_rwlock_wrlock(&dpif->upcall_lock);
978 error = dpif_netlink_port_del__(dpif, port_no);
979 fat_rwlock_unlock(&dpif->upcall_lock);
980
981 return error;
982 }
983
984 static int
985 dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
986 const char *port_name, struct dpif_port *dpif_port)
987 {
988 struct dpif_netlink_vport request;
989 struct dpif_netlink_vport reply;
990 struct ofpbuf *buf;
991 int error;
992
993 dpif_netlink_vport_init(&request);
994 request.cmd = OVS_VPORT_CMD_GET;
995 request.dp_ifindex = dpif->dp_ifindex;
996 request.port_no = port_no;
997 request.name = port_name;
998
999 error = dpif_netlink_vport_transact(&request, &reply, &buf);
1000 if (!error) {
1001 if (reply.dp_ifindex != request.dp_ifindex) {
1002 /* A query by name reported that 'port_name' is in some datapath
1003 * other than 'dpif', but the caller wants to know about 'dpif'. */
1004 error = ENODEV;
1005 } else if (dpif_port) {
1006 dpif_port->name = xstrdup(reply.name);
1007 dpif_port->type = xstrdup(get_vport_type(&reply));
1008 dpif_port->port_no = reply.port_no;
1009 }
1010 ofpbuf_delete(buf);
1011 }
1012 return error;
1013 }
1014
1015 static int
1016 dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
1017 struct dpif_port *dpif_port)
1018 {
1019 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1020
1021 return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
1022 }
1023
1024 static int
1025 dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
1026 struct dpif_port *dpif_port)
1027 {
1028 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1029
1030 return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
1031 }
1032
1033 static uint32_t
1034 dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
1035 odp_port_t port_no)
1036 OVS_REQ_RDLOCK(dpif->upcall_lock)
1037 {
1038 uint32_t port_idx = odp_to_u32(port_no);
1039 uint32_t pid = 0;
1040
1041 if (dpif->handlers && dpif->uc_array_size > 0) {
1042 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1043 * channel, since it is not heavily loaded. */
1044 uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
1045
1046 /* Needs to check in case the socket pointer is changed in between
1047 * the holding of upcall_lock. A known case happens when the main
1048 * thread deletes the vport while the handler thread is handling
1049 * the upcall from that port. */
1050 if (dpif->channels[idx].sock) {
1051 pid = nl_sock_pid(dpif->channels[idx].sock);
1052 }
1053 }
1054
1055 return pid;
1056 }
1057
1058 static uint32_t
1059 dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no)
1060 {
1061 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1062 uint32_t ret;
1063
1064 fat_rwlock_rdlock(&dpif->upcall_lock);
1065 ret = dpif_netlink_port_get_pid__(dpif, port_no);
1066 fat_rwlock_unlock(&dpif->upcall_lock);
1067
1068 return ret;
1069 }
1070
1071 static int
1072 dpif_netlink_flow_flush(struct dpif *dpif_)
1073 {
1074 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1075 struct dpif_netlink_flow flow;
1076
1077 dpif_netlink_flow_init(&flow);
1078 flow.cmd = OVS_FLOW_CMD_DEL;
1079 flow.dp_ifindex = dpif->dp_ifindex;
1080
1081 if (netdev_is_flow_api_enabled()) {
1082 netdev_ports_flow_flush(dpif_->dpif_class);
1083 }
1084
1085 return dpif_netlink_flow_transact(&flow, NULL, NULL);
1086 }
1087
1088 struct dpif_netlink_port_state {
1089 struct nl_dump dump;
1090 struct ofpbuf buf;
1091 };
1092
1093 static void
1094 dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
1095 struct nl_dump *dump)
1096 {
1097 struct dpif_netlink_vport request;
1098 struct ofpbuf *buf;
1099
1100 dpif_netlink_vport_init(&request);
1101 request.cmd = OVS_VPORT_CMD_GET;
1102 request.dp_ifindex = dpif->dp_ifindex;
1103
1104 buf = ofpbuf_new(1024);
1105 dpif_netlink_vport_to_ofpbuf(&request, buf);
1106 nl_dump_start(dump, NETLINK_GENERIC, buf);
1107 ofpbuf_delete(buf);
1108 }
1109
1110 static int
1111 dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
1112 {
1113 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1114 struct dpif_netlink_port_state *state;
1115
1116 *statep = state = xmalloc(sizeof *state);
1117 dpif_netlink_port_dump_start__(dpif, &state->dump);
1118
1119 ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
1120 return 0;
1121 }
1122
1123 static int
1124 dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
1125 struct nl_dump *dump,
1126 struct dpif_netlink_vport *vport,
1127 struct ofpbuf *buffer)
1128 {
1129 struct ofpbuf buf;
1130 int error;
1131
1132 if (!nl_dump_next(dump, &buf, buffer)) {
1133 return EOF;
1134 }
1135
1136 error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
1137 if (error) {
1138 VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
1139 dpif_name(&dpif->dpif), ovs_strerror(error));
1140 }
1141 return error;
1142 }
1143
1144 static int
1145 dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
1146 struct dpif_port *dpif_port)
1147 {
1148 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1149 struct dpif_netlink_port_state *state = state_;
1150 struct dpif_netlink_vport vport;
1151 int error;
1152
1153 error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
1154 &state->buf);
1155 if (error) {
1156 return error;
1157 }
1158 dpif_port->name = CONST_CAST(char *, vport.name);
1159 dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
1160 dpif_port->port_no = vport.port_no;
1161 return 0;
1162 }
1163
1164 static int
1165 dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
1166 {
1167 struct dpif_netlink_port_state *state = state_;
1168 int error = nl_dump_done(&state->dump);
1169
1170 ofpbuf_uninit(&state->buf);
1171 free(state);
1172 return error;
1173 }
1174
1175 static int
1176 dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
1177 {
1178 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1179
1180 /* Lazily create the Netlink socket to listen for notifications. */
1181 if (!dpif->port_notifier) {
1182 struct nl_sock *sock;
1183 int error;
1184
1185 error = nl_sock_create(NETLINK_GENERIC, &sock);
1186 if (error) {
1187 return error;
1188 }
1189
1190 error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
1191 if (error) {
1192 nl_sock_destroy(sock);
1193 return error;
1194 }
1195 dpif->port_notifier = sock;
1196
1197 /* We have no idea of the current state so report that everything
1198 * changed. */
1199 return ENOBUFS;
1200 }
1201
1202 for (;;) {
1203 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1204 uint64_t buf_stub[4096 / 8];
1205 struct ofpbuf buf;
1206 int error;
1207
1208 ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
1209 error = nl_sock_recv(dpif->port_notifier, &buf, NULL, false);
1210 if (!error) {
1211 struct dpif_netlink_vport vport;
1212
1213 error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
1214 if (!error) {
1215 if (vport.dp_ifindex == dpif->dp_ifindex
1216 && (vport.cmd == OVS_VPORT_CMD_NEW
1217 || vport.cmd == OVS_VPORT_CMD_DEL
1218 || vport.cmd == OVS_VPORT_CMD_SET)) {
1219 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
1220 dpif->dpif.full_name, vport.name, vport.cmd);
1221 if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
1222 dpif->refresh_channels = true;
1223 }
1224 *devnamep = xstrdup(vport.name);
1225 ofpbuf_uninit(&buf);
1226 return 0;
1227 }
1228 }
1229 } else if (error != EAGAIN) {
1230 VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
1231 ovs_strerror(error));
1232 nl_sock_drain(dpif->port_notifier);
1233 error = ENOBUFS;
1234 }
1235
1236 ofpbuf_uninit(&buf);
1237 if (error) {
1238 return error;
1239 }
1240 }
1241 }
1242
1243 static void
1244 dpif_netlink_port_poll_wait(const struct dpif *dpif_)
1245 {
1246 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1247
1248 if (dpif->port_notifier) {
1249 nl_sock_wait(dpif->port_notifier, POLLIN);
1250 } else {
1251 poll_immediate_wake();
1252 }
1253 }
1254
1255 static void
1256 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
1257 const ovs_u128 *ufid, bool terse)
1258 {
1259 if (ufid) {
1260 request->ufid = *ufid;
1261 request->ufid_present = true;
1262 } else {
1263 request->ufid_present = false;
1264 }
1265 request->ufid_terse = terse;
1266 }
1267
1268 static void
1269 dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
1270 const struct nlattr *key, size_t key_len,
1271 const ovs_u128 *ufid, bool terse,
1272 struct dpif_netlink_flow *request)
1273 {
1274 dpif_netlink_flow_init(request);
1275 request->cmd = OVS_FLOW_CMD_GET;
1276 request->dp_ifindex = dpif->dp_ifindex;
1277 request->key = key;
1278 request->key_len = key_len;
1279 dpif_netlink_flow_init_ufid(request, ufid, terse);
1280 }
1281
1282 static void
1283 dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
1284 const struct dpif_flow_get *get,
1285 struct dpif_netlink_flow *request)
1286 {
1287 dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
1288 false, request);
1289 }
1290
1291 static int
1292 dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
1293 const struct nlattr *key, size_t key_len,
1294 const ovs_u128 *ufid, bool terse,
1295 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1296 {
1297 struct dpif_netlink_flow request;
1298
1299 dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
1300 return dpif_netlink_flow_transact(&request, reply, bufp);
1301 }
1302
1303 static int
1304 dpif_netlink_flow_get(const struct dpif_netlink *dpif,
1305 const struct dpif_netlink_flow *flow,
1306 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1307 {
1308 return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
1309 flow->ufid_present ? &flow->ufid : NULL,
1310 false, reply, bufp);
1311 }
1312
1313 static void
1314 dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
1315 const struct dpif_flow_put *put,
1316 struct dpif_netlink_flow *request)
1317 {
1318 static const struct nlattr dummy_action;
1319
1320 dpif_netlink_flow_init(request);
1321 request->cmd = (put->flags & DPIF_FP_CREATE
1322 ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
1323 request->dp_ifindex = dpif->dp_ifindex;
1324 request->key = put->key;
1325 request->key_len = put->key_len;
1326 request->mask = put->mask;
1327 request->mask_len = put->mask_len;
1328 dpif_netlink_flow_init_ufid(request, put->ufid, false);
1329
1330 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1331 request->actions = (put->actions
1332 ? put->actions
1333 : CONST_CAST(struct nlattr *, &dummy_action));
1334 request->actions_len = put->actions_len;
1335 if (put->flags & DPIF_FP_ZERO_STATS) {
1336 request->clear = true;
1337 }
1338 if (put->flags & DPIF_FP_PROBE) {
1339 request->probe = true;
1340 }
1341 request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
1342 }
1343
1344 static void
1345 dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
1346 const struct nlattr *key, size_t key_len,
1347 const ovs_u128 *ufid, bool terse,
1348 struct dpif_netlink_flow *request)
1349 {
1350 dpif_netlink_flow_init(request);
1351 request->cmd = OVS_FLOW_CMD_DEL;
1352 request->dp_ifindex = dpif->dp_ifindex;
1353 request->key = key;
1354 request->key_len = key_len;
1355 dpif_netlink_flow_init_ufid(request, ufid, terse);
1356 }
1357
1358 static void
1359 dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
1360 const struct dpif_flow_del *del,
1361 struct dpif_netlink_flow *request)
1362 {
1363 dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
1364 del->ufid, del->terse, request);
1365 }
1366
1367 struct dpif_netlink_flow_dump {
1368 struct dpif_flow_dump up;
1369 struct nl_dump nl_dump;
1370 atomic_int status;
1371 struct netdev_flow_dump **netdev_dumps;
1372 int netdev_dumps_num; /* Number of netdev_flow_dumps */
1373 struct ovs_mutex netdev_lock; /* Guards the following. */
1374 int netdev_current_dump OVS_GUARDED; /* Shared current dump */
1375 struct dpif_flow_dump_types types; /* Type of dump */
1376 };
1377
1378 static struct dpif_netlink_flow_dump *
1379 dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
1380 {
1381 return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
1382 }
1383
1384 static void
1385 start_netdev_dump(const struct dpif *dpif_,
1386 struct dpif_netlink_flow_dump *dump)
1387 {
1388 ovs_mutex_init(&dump->netdev_lock);
1389
1390 if (!(dump->types.netdev_flows)) {
1391 dump->netdev_dumps_num = 0;
1392 dump->netdev_dumps = NULL;
1393 return;
1394 }
1395
1396 ovs_mutex_lock(&dump->netdev_lock);
1397 dump->netdev_current_dump = 0;
1398 dump->netdev_dumps
1399 = netdev_ports_flow_dump_create(dpif_->dpif_class,
1400 &dump->netdev_dumps_num);
1401 ovs_mutex_unlock(&dump->netdev_lock);
1402 }
1403
1404 static void
1405 dpif_netlink_populate_flow_dump_types(struct dpif_netlink_flow_dump *dump,
1406 struct dpif_flow_dump_types *types)
1407 {
1408 if (!types) {
1409 dump->types.ovs_flows = true;
1410 dump->types.netdev_flows = true;
1411 } else {
1412 memcpy(&dump->types, types, sizeof *types);
1413 }
1414 }
1415
1416 static struct dpif_flow_dump *
1417 dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse,
1418 struct dpif_flow_dump_types *types)
1419 {
1420 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1421 struct dpif_netlink_flow_dump *dump;
1422 struct dpif_netlink_flow request;
1423 struct ofpbuf *buf;
1424
1425 dump = xmalloc(sizeof *dump);
1426 dpif_flow_dump_init(&dump->up, dpif_);
1427
1428 dpif_netlink_populate_flow_dump_types(dump, types);
1429
1430 if (dump->types.ovs_flows) {
1431 dpif_netlink_flow_init(&request);
1432 request.cmd = OVS_FLOW_CMD_GET;
1433 request.dp_ifindex = dpif->dp_ifindex;
1434 request.ufid_present = false;
1435 request.ufid_terse = terse;
1436
1437 buf = ofpbuf_new(1024);
1438 dpif_netlink_flow_to_ofpbuf(&request, buf);
1439 nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
1440 ofpbuf_delete(buf);
1441 }
1442 atomic_init(&dump->status, 0);
1443 dump->up.terse = terse;
1444
1445 start_netdev_dump(dpif_, dump);
1446
1447 return &dump->up;
1448 }
1449
1450 static int
1451 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
1452 {
1453 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1454 unsigned int nl_status = 0;
1455 int dump_status;
1456
1457 if (dump->types.ovs_flows) {
1458 nl_status = nl_dump_done(&dump->nl_dump);
1459 }
1460
1461 for (int i = 0; i < dump->netdev_dumps_num; i++) {
1462 int err = netdev_flow_dump_destroy(dump->netdev_dumps[i]);
1463
1464 if (err != 0 && err != EOPNOTSUPP) {
1465 VLOG_ERR("failed dumping netdev: %s", ovs_strerror(err));
1466 }
1467 }
1468
1469 free(dump->netdev_dumps);
1470 ovs_mutex_destroy(&dump->netdev_lock);
1471
1472 /* No other thread has access to 'dump' at this point. */
1473 atomic_read_relaxed(&dump->status, &dump_status);
1474 free(dump);
1475 return dump_status ? dump_status : nl_status;
1476 }
1477
1478 struct dpif_netlink_flow_dump_thread {
1479 struct dpif_flow_dump_thread up;
1480 struct dpif_netlink_flow_dump *dump;
1481 struct dpif_netlink_flow flow;
1482 struct dpif_flow_stats stats;
1483 struct ofpbuf nl_flows; /* Always used to store flows. */
1484 struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
1485 int netdev_dump_idx; /* This thread current netdev dump index */
1486 bool netdev_done; /* If we are finished dumping netdevs */
1487
1488 /* (Key/Mask/Actions) Buffers for netdev dumping */
1489 struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
1490 struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
1491 struct odputil_keybuf actbuf[FLOW_DUMP_MAX_BATCH];
1492 };
1493
1494 static struct dpif_netlink_flow_dump_thread *
1495 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
1496 {
1497 return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
1498 }
1499
1500 static struct dpif_flow_dump_thread *
1501 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
1502 {
1503 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1504 struct dpif_netlink_flow_dump_thread *thread;
1505
1506 thread = xmalloc(sizeof *thread);
1507 dpif_flow_dump_thread_init(&thread->up, &dump->up);
1508 thread->dump = dump;
1509 ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
1510 thread->nl_actions = NULL;
1511 thread->netdev_dump_idx = 0;
1512 thread->netdev_done = !(thread->netdev_dump_idx < dump->netdev_dumps_num);
1513
1514 return &thread->up;
1515 }
1516
1517 static void
1518 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
1519 {
1520 struct dpif_netlink_flow_dump_thread *thread
1521 = dpif_netlink_flow_dump_thread_cast(thread_);
1522
1523 ofpbuf_uninit(&thread->nl_flows);
1524 ofpbuf_delete(thread->nl_actions);
1525 free(thread);
1526 }
1527
1528 static void
1529 dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
1530 const struct dpif_netlink_flow *datapath_flow)
1531 {
1532 dpif_flow->key = datapath_flow->key;
1533 dpif_flow->key_len = datapath_flow->key_len;
1534 dpif_flow->mask = datapath_flow->mask;
1535 dpif_flow->mask_len = datapath_flow->mask_len;
1536 dpif_flow->actions = datapath_flow->actions;
1537 dpif_flow->actions_len = datapath_flow->actions_len;
1538 dpif_flow->ufid_present = datapath_flow->ufid_present;
1539 dpif_flow->pmd_id = PMD_ID_NULL;
1540 if (datapath_flow->ufid_present) {
1541 dpif_flow->ufid = datapath_flow->ufid;
1542 } else {
1543 ovs_assert(datapath_flow->key && datapath_flow->key_len);
1544 dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
1545 &dpif_flow->ufid);
1546 }
1547 dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
1548 dpif_flow->attrs.offloaded = false;
1549 dpif_flow->attrs.dp_layer = "ovs";
1550 }
1551
1552 /* The design is such that all threads are working together on the first dump
1553 * to the last, in order (at first they all on dump 0).
1554 * When the first thread finds that the given dump is finished,
1555 * they all move to the next. If two or more threads find the same dump
1556 * is finished at the same time, the first one will advance the shared
1557 * netdev_current_dump and the others will catch up. */
1558 static void
1559 dpif_netlink_advance_netdev_dump(struct dpif_netlink_flow_dump_thread *thread)
1560 {
1561 struct dpif_netlink_flow_dump *dump = thread->dump;
1562
1563 ovs_mutex_lock(&dump->netdev_lock);
1564 /* if we haven't finished (dumped everything) */
1565 if (dump->netdev_current_dump < dump->netdev_dumps_num) {
1566 /* if we are the first to find that current dump is finished
1567 * advance it. */
1568 if (thread->netdev_dump_idx == dump->netdev_current_dump) {
1569 thread->netdev_dump_idx = ++dump->netdev_current_dump;
1570 /* did we just finish the last dump? done. */
1571 if (dump->netdev_current_dump == dump->netdev_dumps_num) {
1572 thread->netdev_done = true;
1573 }
1574 } else {
1575 /* otherwise, we are behind, catch up */
1576 thread->netdev_dump_idx = dump->netdev_current_dump;
1577 }
1578 } else {
1579 /* some other thread finished */
1580 thread->netdev_done = true;
1581 }
1582 ovs_mutex_unlock(&dump->netdev_lock);
1583 }
1584
1585 static int
1586 dpif_netlink_netdev_match_to_dpif_flow(struct match *match,
1587 struct ofpbuf *key_buf,
1588 struct ofpbuf *mask_buf,
1589 struct nlattr *actions,
1590 struct dpif_flow_stats *stats,
1591 struct dpif_flow_attrs *attrs,
1592 ovs_u128 *ufid,
1593 struct dpif_flow *flow,
1594 bool terse OVS_UNUSED)
1595 {
1596
1597 struct odp_flow_key_parms odp_parms = {
1598 .flow = &match->flow,
1599 .mask = &match->wc.masks,
1600 .support = {
1601 .max_vlan_headers = 2,
1602 },
1603 };
1604 size_t offset;
1605
1606 memset(flow, 0, sizeof *flow);
1607
1608 /* Key */
1609 offset = key_buf->size;
1610 flow->key = ofpbuf_tail(key_buf);
1611 odp_flow_key_from_flow(&odp_parms, key_buf);
1612 flow->key_len = key_buf->size - offset;
1613
1614 /* Mask */
1615 offset = mask_buf->size;
1616 flow->mask = ofpbuf_tail(mask_buf);
1617 odp_parms.key_buf = key_buf;
1618 odp_flow_key_from_mask(&odp_parms, mask_buf);
1619 flow->mask_len = mask_buf->size - offset;
1620
1621 /* Actions */
1622 flow->actions = nl_attr_get(actions);
1623 flow->actions_len = nl_attr_get_size(actions);
1624
1625 /* Stats */
1626 memcpy(&flow->stats, stats, sizeof *stats);
1627
1628 /* UFID */
1629 flow->ufid_present = true;
1630 flow->ufid = *ufid;
1631
1632 flow->pmd_id = PMD_ID_NULL;
1633
1634 memcpy(&flow->attrs, attrs, sizeof *attrs);
1635
1636 return 0;
1637 }
1638
1639 static int
1640 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
1641 struct dpif_flow *flows, int max_flows)
1642 {
1643 struct dpif_netlink_flow_dump_thread *thread
1644 = dpif_netlink_flow_dump_thread_cast(thread_);
1645 struct dpif_netlink_flow_dump *dump = thread->dump;
1646 struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
1647 int n_flows;
1648
1649 ofpbuf_delete(thread->nl_actions);
1650 thread->nl_actions = NULL;
1651
1652 n_flows = 0;
1653 max_flows = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
1654
1655 while (!thread->netdev_done && n_flows < max_flows) {
1656 struct odputil_keybuf *maskbuf = &thread->maskbuf[n_flows];
1657 struct odputil_keybuf *keybuf = &thread->keybuf[n_flows];
1658 struct odputil_keybuf *actbuf = &thread->actbuf[n_flows];
1659 struct ofpbuf key, mask, act;
1660 struct dpif_flow *f = &flows[n_flows];
1661 int cur = thread->netdev_dump_idx;
1662 struct netdev_flow_dump *netdev_dump = dump->netdev_dumps[cur];
1663 struct match match;
1664 struct nlattr *actions;
1665 struct dpif_flow_stats stats;
1666 struct dpif_flow_attrs attrs;
1667 ovs_u128 ufid;
1668 bool has_next;
1669
1670 ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
1671 ofpbuf_use_stack(&act, actbuf, sizeof *actbuf);
1672 ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
1673 has_next = netdev_flow_dump_next(netdev_dump, &match,
1674 &actions, &stats, &attrs,
1675 &ufid,
1676 &thread->nl_flows,
1677 &act);
1678 if (has_next) {
1679 dpif_netlink_netdev_match_to_dpif_flow(&match,
1680 &key, &mask,
1681 actions,
1682 &stats,
1683 &attrs,
1684 &ufid,
1685 f,
1686 dump->up.terse);
1687 n_flows++;
1688 } else {
1689 dpif_netlink_advance_netdev_dump(thread);
1690 }
1691 }
1692
1693 if (!(dump->types.ovs_flows)) {
1694 return n_flows;
1695 }
1696
1697 while (!n_flows
1698 || (n_flows < max_flows && thread->nl_flows.size)) {
1699 struct dpif_netlink_flow datapath_flow;
1700 struct ofpbuf nl_flow;
1701 int error;
1702
1703 /* Try to grab another flow. */
1704 if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
1705 break;
1706 }
1707
1708 /* Convert the flow to our output format. */
1709 error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
1710 if (error) {
1711 atomic_store_relaxed(&dump->status, error);
1712 break;
1713 }
1714
1715 if (dump->up.terse || datapath_flow.actions) {
1716 /* Common case: we don't want actions, or the flow includes
1717 * actions. */
1718 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1719 &datapath_flow);
1720 } else {
1721 /* Rare case: the flow does not include actions. Retrieve this
1722 * individual flow again to get the actions. */
1723 error = dpif_netlink_flow_get(dpif, &datapath_flow,
1724 &datapath_flow, &thread->nl_actions);
1725 if (error == ENOENT) {
1726 VLOG_DBG("dumped flow disappeared on get");
1727 continue;
1728 } else if (error) {
1729 VLOG_WARN("error fetching dumped flow: %s",
1730 ovs_strerror(error));
1731 atomic_store_relaxed(&dump->status, error);
1732 break;
1733 }
1734
1735 /* Save this flow. Then exit, because we only have one buffer to
1736 * handle this case. */
1737 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1738 &datapath_flow);
1739 break;
1740 }
1741 }
1742 return n_flows;
1743 }
1744
1745 static void
1746 dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
1747 struct ofpbuf *buf)
1748 {
1749 struct ovs_header *k_exec;
1750 size_t key_ofs;
1751
1752 ofpbuf_prealloc_tailroom(buf, (64
1753 + dp_packet_size(d_exec->packet)
1754 + ODP_KEY_METADATA_SIZE
1755 + d_exec->actions_len));
1756
1757 nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
1758 OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
1759
1760 k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
1761 k_exec->dp_ifindex = dp_ifindex;
1762
1763 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
1764 dp_packet_data(d_exec->packet),
1765 dp_packet_size(d_exec->packet));
1766
1767 key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
1768 odp_key_from_dp_packet(buf, d_exec->packet);
1769 nl_msg_end_nested(buf, key_ofs);
1770
1771 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
1772 d_exec->actions, d_exec->actions_len);
1773 if (d_exec->probe) {
1774 nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE);
1775 }
1776 if (d_exec->mtu) {
1777 nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu);
1778 }
1779 }
1780
1781 /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
1782 * Returns the number actually executed (at least 1, if 'n_ops' is
1783 * positive). */
1784 static size_t
1785 dpif_netlink_operate__(struct dpif_netlink *dpif,
1786 struct dpif_op **ops, size_t n_ops)
1787 {
1788 struct op_auxdata {
1789 struct nl_transaction txn;
1790
1791 struct ofpbuf request;
1792 uint64_t request_stub[1024 / 8];
1793
1794 struct ofpbuf reply;
1795 uint64_t reply_stub[1024 / 8];
1796 } auxes[OPERATE_MAX_OPS];
1797
1798 struct nl_transaction *txnsp[OPERATE_MAX_OPS];
1799 size_t i;
1800
1801 n_ops = MIN(n_ops, OPERATE_MAX_OPS);
1802 for (i = 0; i < n_ops; i++) {
1803 struct op_auxdata *aux = &auxes[i];
1804 struct dpif_op *op = ops[i];
1805 struct dpif_flow_put *put;
1806 struct dpif_flow_del *del;
1807 struct dpif_flow_get *get;
1808 struct dpif_netlink_flow flow;
1809
1810 ofpbuf_use_stub(&aux->request,
1811 aux->request_stub, sizeof aux->request_stub);
1812 aux->txn.request = &aux->request;
1813
1814 ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
1815 aux->txn.reply = NULL;
1816
1817 switch (op->type) {
1818 case DPIF_OP_FLOW_PUT:
1819 put = &op->flow_put;
1820 dpif_netlink_init_flow_put(dpif, put, &flow);
1821 if (put->stats) {
1822 flow.nlmsg_flags |= NLM_F_ECHO;
1823 aux->txn.reply = &aux->reply;
1824 }
1825 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1826 break;
1827
1828 case DPIF_OP_FLOW_DEL:
1829 del = &op->flow_del;
1830 dpif_netlink_init_flow_del(dpif, del, &flow);
1831 if (del->stats) {
1832 flow.nlmsg_flags |= NLM_F_ECHO;
1833 aux->txn.reply = &aux->reply;
1834 }
1835 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1836 break;
1837
1838 case DPIF_OP_EXECUTE:
1839 /* Can't execute a packet that won't fit in a Netlink attribute. */
1840 if (OVS_UNLIKELY(nl_attr_oversized(
1841 dp_packet_size(op->execute.packet)))) {
1842 /* Report an error immediately if this is the first operation.
1843 * Otherwise the easiest thing to do is to postpone to the next
1844 * call (when this will be the first operation). */
1845 if (i == 0) {
1846 VLOG_ERR_RL(&error_rl,
1847 "dropping oversized %"PRIu32"-byte packet",
1848 dp_packet_size(op->execute.packet));
1849 op->error = ENOBUFS;
1850 return 1;
1851 }
1852 n_ops = i;
1853 } else {
1854 dpif_netlink_encode_execute(dpif->dp_ifindex, &op->execute,
1855 &aux->request);
1856 }
1857 break;
1858
1859 case DPIF_OP_FLOW_GET:
1860 get = &op->flow_get;
1861 dpif_netlink_init_flow_get(dpif, get, &flow);
1862 aux->txn.reply = get->buffer;
1863 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1864 break;
1865
1866 default:
1867 OVS_NOT_REACHED();
1868 }
1869 }
1870
1871 for (i = 0; i < n_ops; i++) {
1872 txnsp[i] = &auxes[i].txn;
1873 }
1874 nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
1875
1876 for (i = 0; i < n_ops; i++) {
1877 struct op_auxdata *aux = &auxes[i];
1878 struct nl_transaction *txn = &auxes[i].txn;
1879 struct dpif_op *op = ops[i];
1880 struct dpif_flow_put *put;
1881 struct dpif_flow_del *del;
1882 struct dpif_flow_get *get;
1883
1884 op->error = txn->error;
1885
1886 switch (op->type) {
1887 case DPIF_OP_FLOW_PUT:
1888 put = &op->flow_put;
1889 if (put->stats) {
1890 if (!op->error) {
1891 struct dpif_netlink_flow reply;
1892
1893 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1894 txn->reply);
1895 if (!op->error) {
1896 dpif_netlink_flow_get_stats(&reply, put->stats);
1897 }
1898 }
1899 }
1900 break;
1901
1902 case DPIF_OP_FLOW_DEL:
1903 del = &op->flow_del;
1904 if (del->stats) {
1905 if (!op->error) {
1906 struct dpif_netlink_flow reply;
1907
1908 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1909 txn->reply);
1910 if (!op->error) {
1911 dpif_netlink_flow_get_stats(&reply, del->stats);
1912 }
1913 }
1914 }
1915 break;
1916
1917 case DPIF_OP_EXECUTE:
1918 break;
1919
1920 case DPIF_OP_FLOW_GET:
1921 get = &op->flow_get;
1922 if (!op->error) {
1923 struct dpif_netlink_flow reply;
1924
1925 op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
1926 if (!op->error) {
1927 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
1928 &reply);
1929 }
1930 }
1931 break;
1932
1933 default:
1934 OVS_NOT_REACHED();
1935 }
1936
1937 ofpbuf_uninit(&aux->request);
1938 ofpbuf_uninit(&aux->reply);
1939 }
1940
1941 return n_ops;
1942 }
1943
1944 static int
1945 parse_flow_get(struct dpif_netlink *dpif, struct dpif_flow_get *get)
1946 {
1947 struct dpif_flow *dpif_flow = get->flow;
1948 struct match match;
1949 struct nlattr *actions;
1950 struct dpif_flow_stats stats;
1951 struct dpif_flow_attrs attrs;
1952 struct ofpbuf buf;
1953 uint64_t act_buf[1024 / 8];
1954 struct odputil_keybuf maskbuf;
1955 struct odputil_keybuf keybuf;
1956 struct odputil_keybuf actbuf;
1957 struct ofpbuf key, mask, act;
1958 int err;
1959
1960 ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
1961 err = netdev_ports_flow_get(dpif->dpif.dpif_class, &match,
1962 &actions, get->ufid, &stats, &attrs, &buf);
1963 if (err) {
1964 return err;
1965 }
1966
1967 VLOG_DBG("found flow from netdev, translating to dpif flow");
1968
1969 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
1970 ofpbuf_use_stack(&act, &actbuf, sizeof actbuf);
1971 ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
1972 dpif_netlink_netdev_match_to_dpif_flow(&match, &key, &mask, actions,
1973 &stats, &attrs,
1974 (ovs_u128 *) get->ufid,
1975 dpif_flow,
1976 false);
1977 ofpbuf_put(get->buffer, nl_attr_get(actions), nl_attr_get_size(actions));
1978 dpif_flow->actions = ofpbuf_at(get->buffer, 0, 0);
1979 dpif_flow->actions_len = nl_attr_get_size(actions);
1980
1981 return 0;
1982 }
1983
1984 static int
1985 parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put)
1986 {
1987 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1988 const struct dpif_class *dpif_class = dpif->dpif.dpif_class;
1989 struct match match;
1990 odp_port_t in_port;
1991 const struct nlattr *nla;
1992 size_t left;
1993 struct netdev *dev;
1994 struct offload_info info;
1995 ovs_be16 dst_port = 0;
1996 uint8_t csum_on = false;
1997 int err;
1998
1999 if (put->flags & DPIF_FP_PROBE) {
2000 return EOPNOTSUPP;
2001 }
2002
2003 err = parse_key_and_mask_to_match(put->key, put->key_len, put->mask,
2004 put->mask_len, &match);
2005 if (err) {
2006 return err;
2007 }
2008
2009 in_port = match.flow.in_port.odp_port;
2010 dev = netdev_ports_get(in_port, dpif_class);
2011 if (!dev) {
2012 return EOPNOTSUPP;
2013 }
2014
2015 /* Get tunnel dst port */
2016 NL_ATTR_FOR_EACH(nla, left, put->actions, put->actions_len) {
2017 if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
2018 const struct netdev_tunnel_config *tnl_cfg;
2019 struct netdev *outdev;
2020 odp_port_t out_port;
2021
2022 out_port = nl_attr_get_odp_port(nla);
2023 outdev = netdev_ports_get(out_port, dpif_class);
2024 if (!outdev) {
2025 err = EOPNOTSUPP;
2026 goto out;
2027 }
2028 tnl_cfg = netdev_get_tunnel_config(outdev);
2029 if (tnl_cfg && tnl_cfg->dst_port != 0) {
2030 dst_port = tnl_cfg->dst_port;
2031 }
2032 if (tnl_cfg) {
2033 csum_on = tnl_cfg->csum;
2034 }
2035 netdev_close(outdev);
2036 }
2037 }
2038
2039 info.dpif_class = dpif_class;
2040 info.tp_dst_port = dst_port;
2041 info.tunnel_csum_on = csum_on;
2042 err = netdev_flow_put(dev, &match,
2043 CONST_CAST(struct nlattr *, put->actions),
2044 put->actions_len,
2045 CONST_CAST(ovs_u128 *, put->ufid),
2046 &info, put->stats);
2047
2048 if (!err) {
2049 if (put->flags & DPIF_FP_MODIFY) {
2050 struct dpif_op *opp;
2051 struct dpif_op op;
2052
2053 op.type = DPIF_OP_FLOW_DEL;
2054 op.flow_del.key = put->key;
2055 op.flow_del.key_len = put->key_len;
2056 op.flow_del.ufid = put->ufid;
2057 op.flow_del.pmd_id = put->pmd_id;
2058 op.flow_del.stats = NULL;
2059 op.flow_del.terse = false;
2060
2061 opp = &op;
2062 dpif_netlink_operate__(dpif, &opp, 1);
2063 }
2064
2065 VLOG_DBG("added flow");
2066 } else if (err != EEXIST) {
2067 struct netdev *oor_netdev = NULL;
2068 enum vlog_level level;
2069 if (err == ENOSPC && netdev_is_offload_rebalance_policy_enabled()) {
2070 /*
2071 * We need to set OOR on the input netdev (i.e, 'dev') for the
2072 * flow. But if the flow has a tunnel attribute (i.e, decap action,
2073 * with a virtual device like a VxLAN interface as its in-port),
2074 * then lookup and set OOR on the underlying tunnel (real) netdev.
2075 */
2076 oor_netdev = flow_get_tunnel_netdev(&match.flow.tunnel);
2077 if (!oor_netdev) {
2078 /* Not a 'tunnel' flow */
2079 oor_netdev = dev;
2080 }
2081 netdev_set_hw_info(oor_netdev, HW_INFO_TYPE_OOR, true);
2082 }
2083 level = (err == ENOSPC || err == EOPNOTSUPP) ? VLL_DBG : VLL_ERR;
2084 VLOG_RL(&rl, level, "failed to offload flow: %s: %s",
2085 ovs_strerror(err),
2086 (oor_netdev ? oor_netdev->name : dev->name));
2087 }
2088
2089 out:
2090 if (err && err != EEXIST && (put->flags & DPIF_FP_MODIFY)) {
2091 /* Modified rule can't be offloaded, try and delete from HW */
2092 int del_err = netdev_flow_del(dev, put->ufid, put->stats);
2093
2094 if (!del_err) {
2095 /* Delete from hw success, so old flow was offloaded.
2096 * Change flags to create the flow in kernel */
2097 put->flags &= ~DPIF_FP_MODIFY;
2098 put->flags |= DPIF_FP_CREATE;
2099 } else if (del_err != ENOENT) {
2100 VLOG_ERR_RL(&rl, "failed to delete offloaded flow: %s",
2101 ovs_strerror(del_err));
2102 /* stop proccesing the flow in kernel */
2103 err = 0;
2104 }
2105 }
2106
2107 netdev_close(dev);
2108
2109 return err;
2110 }
2111
2112 static int
2113 try_send_to_netdev(struct dpif_netlink *dpif, struct dpif_op *op)
2114 {
2115 int err = EOPNOTSUPP;
2116
2117 switch (op->type) {
2118 case DPIF_OP_FLOW_PUT: {
2119 struct dpif_flow_put *put = &op->flow_put;
2120
2121 if (!put->ufid) {
2122 break;
2123 }
2124
2125 log_flow_put_message(&dpif->dpif, &this_module, put, 0);
2126 err = parse_flow_put(dpif, put);
2127 break;
2128 }
2129 case DPIF_OP_FLOW_DEL: {
2130 struct dpif_flow_del *del = &op->flow_del;
2131
2132 if (!del->ufid) {
2133 break;
2134 }
2135
2136 log_flow_del_message(&dpif->dpif, &this_module, del, 0);
2137 err = netdev_ports_flow_del(dpif->dpif.dpif_class, del->ufid,
2138 del->stats);
2139 break;
2140 }
2141 case DPIF_OP_FLOW_GET: {
2142 struct dpif_flow_get *get = &op->flow_get;
2143
2144 if (!op->flow_get.ufid) {
2145 break;
2146 }
2147
2148 log_flow_get_message(&dpif->dpif, &this_module, get, 0);
2149 err = parse_flow_get(dpif, get);
2150 break;
2151 }
2152 case DPIF_OP_EXECUTE:
2153 default:
2154 break;
2155 }
2156
2157 return err;
2158 }
2159
2160 static void
2161 dpif_netlink_operate_chunks(struct dpif_netlink *dpif, struct dpif_op **ops,
2162 size_t n_ops)
2163 {
2164 while (n_ops > 0) {
2165 size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops);
2166
2167 ops += chunk;
2168 n_ops -= chunk;
2169 }
2170 }
2171
2172 static void
2173 dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops,
2174 enum dpif_offload_type offload_type)
2175 {
2176 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2177 struct dpif_op *new_ops[OPERATE_MAX_OPS];
2178 int count = 0;
2179 int i = 0;
2180 int err = 0;
2181
2182 if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
2183 VLOG_DBG("Invalid offload_type: %d", offload_type);
2184 return;
2185 }
2186
2187 if (offload_type != DPIF_OFFLOAD_NEVER && netdev_is_flow_api_enabled()) {
2188 while (n_ops > 0) {
2189 count = 0;
2190
2191 while (n_ops > 0 && count < OPERATE_MAX_OPS) {
2192 struct dpif_op *op = ops[i++];
2193
2194 err = try_send_to_netdev(dpif, op);
2195 if (err && err != EEXIST) {
2196 if (offload_type == DPIF_OFFLOAD_ALWAYS) {
2197 /* We got an error while offloading an op. Since
2198 * OFFLOAD_ALWAYS is specified, we stop further
2199 * processing and return to the caller without
2200 * invoking kernel datapath as fallback. But the
2201 * interface requires us to process all n_ops; so
2202 * return the same error in the remaining ops too.
2203 */
2204 op->error = err;
2205 n_ops--;
2206 while (n_ops > 0) {
2207 op = ops[i++];
2208 op->error = err;
2209 n_ops--;
2210 }
2211 return;
2212 }
2213 new_ops[count++] = op;
2214 } else {
2215 op->error = err;
2216 }
2217
2218 n_ops--;
2219 }
2220
2221 dpif_netlink_operate_chunks(dpif, new_ops, count);
2222 }
2223 } else if (offload_type != DPIF_OFFLOAD_ALWAYS) {
2224 dpif_netlink_operate_chunks(dpif, ops, n_ops);
2225 }
2226 }
2227
2228 #if _WIN32
2229 static void
2230 dpif_netlink_handler_uninit(struct dpif_handler *handler)
2231 {
2232 vport_delete_sock_pool(handler);
2233 }
2234
2235 static int
2236 dpif_netlink_handler_init(struct dpif_handler *handler)
2237 {
2238 return vport_create_sock_pool(handler);
2239 }
2240 #else
2241
2242 static int
2243 dpif_netlink_handler_init(struct dpif_handler *handler)
2244 {
2245 handler->epoll_fd = epoll_create(10);
2246 return handler->epoll_fd < 0 ? errno : 0;
2247 }
2248
2249 static void
2250 dpif_netlink_handler_uninit(struct dpif_handler *handler)
2251 {
2252 close(handler->epoll_fd);
2253 }
2254 #endif
2255
2256 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
2257 * currently in 'dpif' in the kernel, by adding a new set of channels for
2258 * any kernel vport that lacks one and deleting any channels that have no
2259 * backing kernel vports. */
2260 static int
2261 dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
2262 OVS_REQ_WRLOCK(dpif->upcall_lock)
2263 {
2264 unsigned long int *keep_channels;
2265 struct dpif_netlink_vport vport;
2266 size_t keep_channels_nbits;
2267 struct nl_dump dump;
2268 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
2269 struct ofpbuf buf;
2270 int retval = 0;
2271 size_t i;
2272
2273 ovs_assert(!WINDOWS || n_handlers <= 1);
2274 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
2275
2276 if (dpif->n_handlers != n_handlers) {
2277 destroy_all_channels(dpif);
2278 dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
2279 for (i = 0; i < n_handlers; i++) {
2280 int error;
2281 struct dpif_handler *handler = &dpif->handlers[i];
2282
2283 error = dpif_netlink_handler_init(handler);
2284 if (error) {
2285 size_t j;
2286
2287 for (j = 0; j < i; j++) {
2288 struct dpif_handler *tmp = &dpif->handlers[j];
2289 dpif_netlink_handler_uninit(tmp);
2290 }
2291 free(dpif->handlers);
2292 dpif->handlers = NULL;
2293
2294 return error;
2295 }
2296 }
2297 dpif->n_handlers = n_handlers;
2298 }
2299
2300 for (i = 0; i < n_handlers; i++) {
2301 struct dpif_handler *handler = &dpif->handlers[i];
2302
2303 handler->event_offset = handler->n_events = 0;
2304 }
2305
2306 keep_channels_nbits = dpif->uc_array_size;
2307 keep_channels = bitmap_allocate(keep_channels_nbits);
2308
2309 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
2310 dpif_netlink_port_dump_start__(dpif, &dump);
2311 while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
2312 uint32_t port_no = odp_to_u32(vport.port_no);
2313 uint32_t upcall_pid;
2314 int error;
2315
2316 if (port_no >= dpif->uc_array_size
2317 || !vport_get_pid(dpif, port_no, &upcall_pid)) {
2318 struct nl_sock *socksp;
2319 error = create_nl_sock(dpif, &socksp);
2320
2321 if (error) {
2322 goto error;
2323 }
2324
2325 error = vport_add_channel(dpif, vport.port_no, socksp);
2326 if (error) {
2327 VLOG_INFO("%s: could not add channels for port %s",
2328 dpif_name(&dpif->dpif), vport.name);
2329 nl_sock_destroy(socksp);
2330 retval = error;
2331 goto error;
2332 }
2333 upcall_pid = nl_sock_pid(socksp);
2334 }
2335
2336 /* Configure the vport to deliver misses to 'sock'. */
2337 if (vport.upcall_pids[0] == 0
2338 || vport.n_upcall_pids != 1
2339 || upcall_pid != vport.upcall_pids[0]) {
2340 struct dpif_netlink_vport vport_request;
2341
2342 dpif_netlink_vport_init(&vport_request);
2343 vport_request.cmd = OVS_VPORT_CMD_SET;
2344 vport_request.dp_ifindex = dpif->dp_ifindex;
2345 vport_request.port_no = vport.port_no;
2346 vport_request.n_upcall_pids = 1;
2347 vport_request.upcall_pids = &upcall_pid;
2348 error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
2349 if (error) {
2350 VLOG_WARN_RL(&error_rl,
2351 "%s: failed to set upcall pid on port: %s",
2352 dpif_name(&dpif->dpif), ovs_strerror(error));
2353
2354 if (error != ENODEV && error != ENOENT) {
2355 retval = error;
2356 } else {
2357 /* The vport isn't really there, even though the dump says
2358 * it is. Probably we just hit a race after a port
2359 * disappeared. */
2360 }
2361 goto error;
2362 }
2363 }
2364
2365 if (port_no < keep_channels_nbits) {
2366 bitmap_set1(keep_channels, port_no);
2367 }
2368 continue;
2369
2370 error:
2371 vport_del_channels(dpif, vport.port_no);
2372 }
2373 nl_dump_done(&dump);
2374 ofpbuf_uninit(&buf);
2375
2376 /* Discard any saved channels that we didn't reuse. */
2377 for (i = 0; i < keep_channels_nbits; i++) {
2378 if (!bitmap_is_set(keep_channels, i)) {
2379 vport_del_channels(dpif, u32_to_odp(i));
2380 }
2381 }
2382 free(keep_channels);
2383
2384 return retval;
2385 }
2386
2387 static int
2388 dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
2389 OVS_REQ_WRLOCK(dpif->upcall_lock)
2390 {
2391 if ((dpif->handlers != NULL) == enable) {
2392 return 0;
2393 } else if (!enable) {
2394 destroy_all_channels(dpif);
2395 return 0;
2396 } else {
2397 return dpif_netlink_refresh_channels(dpif, 1);
2398 }
2399 }
2400
2401 static int
2402 dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
2403 {
2404 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2405 int error;
2406
2407 fat_rwlock_wrlock(&dpif->upcall_lock);
2408 error = dpif_netlink_recv_set__(dpif, enable);
2409 fat_rwlock_unlock(&dpif->upcall_lock);
2410
2411 return error;
2412 }
2413
2414 static int
2415 dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
2416 {
2417 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2418 int error = 0;
2419
2420 #ifdef _WIN32
2421 /* Multiple upcall handlers will be supported once kernel datapath supports
2422 * it. */
2423 if (n_handlers > 1) {
2424 return error;
2425 }
2426 #endif
2427
2428 fat_rwlock_wrlock(&dpif->upcall_lock);
2429 if (dpif->handlers) {
2430 error = dpif_netlink_refresh_channels(dpif, n_handlers);
2431 }
2432 fat_rwlock_unlock(&dpif->upcall_lock);
2433
2434 return error;
2435 }
2436
2437 static int
2438 dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
2439 uint32_t queue_id, uint32_t *priority)
2440 {
2441 if (queue_id < 0xf000) {
2442 *priority = TC_H_MAKE(1 << 16, queue_id + 1);
2443 return 0;
2444 } else {
2445 return EINVAL;
2446 }
2447 }
2448
2449 static int
2450 parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
2451 struct dpif_upcall *upcall, int *dp_ifindex)
2452 {
2453 static const struct nl_policy ovs_packet_policy[] = {
2454 /* Always present. */
2455 [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
2456 .min_len = ETH_HEADER_LEN },
2457 [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
2458
2459 /* OVS_PACKET_CMD_ACTION only. */
2460 [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
2461 [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
2462 [OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
2463 [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true }
2464 };
2465
2466 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
2467 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2468 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
2469 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2470
2471 struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
2472 if (!nlmsg || !genl || !ovs_header
2473 || nlmsg->nlmsg_type != ovs_packet_family
2474 || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
2475 ARRAY_SIZE(ovs_packet_policy))) {
2476 return EINVAL;
2477 }
2478
2479 int type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
2480 : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
2481 : -1);
2482 if (type < 0) {
2483 return EINVAL;
2484 }
2485
2486 /* (Re)set ALL fields of '*upcall' on successful return. */
2487 upcall->type = type;
2488 upcall->key = CONST_CAST(struct nlattr *,
2489 nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
2490 upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
2491 dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
2492 upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
2493 upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
2494 upcall->actions = a[OVS_PACKET_ATTR_ACTIONS];
2495 upcall->mru = a[OVS_PACKET_ATTR_MRU];
2496
2497 /* Allow overwriting the netlink attribute header without reallocating. */
2498 dp_packet_use_stub(&upcall->packet,
2499 CONST_CAST(struct nlattr *,
2500 nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
2501 nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
2502 sizeof(struct nlattr));
2503 dp_packet_set_data(&upcall->packet,
2504 (char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr));
2505 dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
2506
2507 if (nl_attr_find__(upcall->key, upcall->key_len, OVS_KEY_ATTR_ETHERNET)) {
2508 /* Ethernet frame */
2509 upcall->packet.packet_type = htonl(PT_ETH);
2510 } else {
2511 /* Non-Ethernet packet. Get the Ethertype from the NL attributes */
2512 ovs_be16 ethertype = 0;
2513 const struct nlattr *et_nla = nl_attr_find__(upcall->key,
2514 upcall->key_len,
2515 OVS_KEY_ATTR_ETHERTYPE);
2516 if (et_nla) {
2517 ethertype = nl_attr_get_be16(et_nla);
2518 }
2519 upcall->packet.packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
2520 ntohs(ethertype));
2521 dp_packet_set_l3(&upcall->packet, dp_packet_data(&upcall->packet));
2522 }
2523
2524 *dp_ifindex = ovs_header->dp_ifindex;
2525
2526 return 0;
2527 }
2528
2529 #ifdef _WIN32
2530 #define PACKET_RECV_BATCH_SIZE 50
2531 static int
2532 dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
2533 struct dpif_upcall *upcall, struct ofpbuf *buf)
2534 OVS_REQ_RDLOCK(dpif->upcall_lock)
2535 {
2536 struct dpif_handler *handler;
2537 int read_tries = 0;
2538 struct dpif_windows_vport_sock *sock_pool;
2539 uint32_t i;
2540
2541 if (!dpif->handlers) {
2542 return EAGAIN;
2543 }
2544
2545 /* Only one handler is supported currently. */
2546 if (handler_id >= 1) {
2547 return EAGAIN;
2548 }
2549
2550 if (handler_id >= dpif->n_handlers) {
2551 return EAGAIN;
2552 }
2553
2554 handler = &dpif->handlers[handler_id];
2555 sock_pool = handler->vport_sock_pool;
2556
2557 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2558 for (;;) {
2559 int dp_ifindex;
2560 int error;
2561
2562 if (++read_tries > PACKET_RECV_BATCH_SIZE) {
2563 return EAGAIN;
2564 }
2565
2566 error = nl_sock_recv(sock_pool[i].nl_sock, buf, NULL, false);
2567 if (error == ENOBUFS) {
2568 /* ENOBUFS typically means that we've received so many
2569 * packets that the buffer overflowed. Try again
2570 * immediately because there's almost certainly a packet
2571 * waiting for us. */
2572 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2573 continue;
2574 }
2575
2576 /* XXX: ch->last_poll = time_msec(); */
2577 if (error) {
2578 if (error == EAGAIN) {
2579 break;
2580 }
2581 return error;
2582 }
2583
2584 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2585 if (!error && dp_ifindex == dpif->dp_ifindex) {
2586 return 0;
2587 } else if (error) {
2588 return error;
2589 }
2590 }
2591 }
2592
2593 return EAGAIN;
2594 }
2595 #else
2596 static int
2597 dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
2598 struct dpif_upcall *upcall, struct ofpbuf *buf)
2599 OVS_REQ_RDLOCK(dpif->upcall_lock)
2600 {
2601 struct dpif_handler *handler;
2602 int read_tries = 0;
2603
2604 if (!dpif->handlers || handler_id >= dpif->n_handlers) {
2605 return EAGAIN;
2606 }
2607
2608 handler = &dpif->handlers[handler_id];
2609 if (handler->event_offset >= handler->n_events) {
2610 int retval;
2611
2612 handler->event_offset = handler->n_events = 0;
2613
2614 do {
2615 retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
2616 dpif->uc_array_size, 0);
2617 } while (retval < 0 && errno == EINTR);
2618
2619 if (retval < 0) {
2620 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2621 VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
2622 } else if (retval > 0) {
2623 handler->n_events = retval;
2624 }
2625 }
2626
2627 while (handler->event_offset < handler->n_events) {
2628 int idx = handler->epoll_events[handler->event_offset].data.u32;
2629 struct dpif_channel *ch = &dpif->channels[idx];
2630
2631 handler->event_offset++;
2632
2633 for (;;) {
2634 int dp_ifindex;
2635 int error;
2636
2637 if (++read_tries > 50) {
2638 return EAGAIN;
2639 }
2640
2641 error = nl_sock_recv(ch->sock, buf, NULL, false);
2642 if (error == ENOBUFS) {
2643 /* ENOBUFS typically means that we've received so many
2644 * packets that the buffer overflowed. Try again
2645 * immediately because there's almost certainly a packet
2646 * waiting for us. */
2647 report_loss(dpif, ch, idx, handler_id);
2648 continue;
2649 }
2650
2651 ch->last_poll = time_msec();
2652 if (error) {
2653 if (error == EAGAIN) {
2654 break;
2655 }
2656 return error;
2657 }
2658
2659 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2660 if (!error && dp_ifindex == dpif->dp_ifindex) {
2661 return 0;
2662 } else if (error) {
2663 return error;
2664 }
2665 }
2666 }
2667
2668 return EAGAIN;
2669 }
2670 #endif
2671
2672 static int
2673 dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
2674 struct dpif_upcall *upcall, struct ofpbuf *buf)
2675 {
2676 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2677 int error;
2678
2679 fat_rwlock_rdlock(&dpif->upcall_lock);
2680 #ifdef _WIN32
2681 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
2682 #else
2683 error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
2684 #endif
2685 fat_rwlock_unlock(&dpif->upcall_lock);
2686
2687 return error;
2688 }
2689
2690 static void
2691 dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
2692 OVS_REQ_RDLOCK(dpif->upcall_lock)
2693 {
2694 #ifdef _WIN32
2695 uint32_t i;
2696 struct dpif_windows_vport_sock *sock_pool =
2697 dpif->handlers[handler_id].vport_sock_pool;
2698
2699 /* Only one handler is supported currently. */
2700 if (handler_id >= 1) {
2701 return;
2702 }
2703
2704 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2705 nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
2706 }
2707 #else
2708 if (dpif->handlers && handler_id < dpif->n_handlers) {
2709 struct dpif_handler *handler = &dpif->handlers[handler_id];
2710
2711 poll_fd_wait(handler->epoll_fd, POLLIN);
2712 }
2713 #endif
2714 }
2715
2716 static void
2717 dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
2718 {
2719 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2720
2721 fat_rwlock_rdlock(&dpif->upcall_lock);
2722 dpif_netlink_recv_wait__(dpif, handler_id);
2723 fat_rwlock_unlock(&dpif->upcall_lock);
2724 }
2725
2726 static void
2727 dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
2728 OVS_REQ_WRLOCK(dpif->upcall_lock)
2729 {
2730 if (dpif->handlers) {
2731 size_t i;
2732
2733 if (!dpif->channels[0].sock) {
2734 return;
2735 }
2736 for (i = 0; i < dpif->uc_array_size; i++ ) {
2737
2738 nl_sock_drain(dpif->channels[i].sock);
2739 }
2740 }
2741 }
2742
2743 static void
2744 dpif_netlink_recv_purge(struct dpif *dpif_)
2745 {
2746 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2747
2748 fat_rwlock_wrlock(&dpif->upcall_lock);
2749 dpif_netlink_recv_purge__(dpif);
2750 fat_rwlock_unlock(&dpif->upcall_lock);
2751 }
2752
2753 static char *
2754 dpif_netlink_get_datapath_version(void)
2755 {
2756 char *version_str = NULL;
2757
2758 #ifdef __linux__
2759
2760 #define MAX_VERSION_STR_SIZE 80
2761 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2762 FILE *f;
2763
2764 f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
2765 if (f) {
2766 char *newline;
2767 char version[MAX_VERSION_STR_SIZE];
2768
2769 if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
2770 newline = strchr(version, '\n');
2771 if (newline) {
2772 *newline = '\0';
2773 }
2774 version_str = xstrdup(version);
2775 }
2776 fclose(f);
2777 }
2778 #endif
2779
2780 return version_str;
2781 }
2782
2783 struct dpif_netlink_ct_dump_state {
2784 struct ct_dpif_dump_state up;
2785 struct nl_ct_dump_state *nl_ct_dump;
2786 };
2787
2788 static int
2789 dpif_netlink_ct_dump_start(struct dpif *dpif OVS_UNUSED,
2790 struct ct_dpif_dump_state **dump_,
2791 const uint16_t *zone, int *ptot_bkts)
2792 {
2793 struct dpif_netlink_ct_dump_state *dump;
2794 int err;
2795
2796 dump = xzalloc(sizeof *dump);
2797 err = nl_ct_dump_start(&dump->nl_ct_dump, zone, ptot_bkts);
2798 if (err) {
2799 free(dump);
2800 return err;
2801 }
2802
2803 *dump_ = &dump->up;
2804
2805 return 0;
2806 }
2807
2808 static int
2809 dpif_netlink_ct_dump_next(struct dpif *dpif OVS_UNUSED,
2810 struct ct_dpif_dump_state *dump_,
2811 struct ct_dpif_entry *entry)
2812 {
2813 struct dpif_netlink_ct_dump_state *dump;
2814
2815 INIT_CONTAINER(dump, dump_, up);
2816
2817 return nl_ct_dump_next(dump->nl_ct_dump, entry);
2818 }
2819
2820 static int
2821 dpif_netlink_ct_dump_done(struct dpif *dpif OVS_UNUSED,
2822 struct ct_dpif_dump_state *dump_)
2823 {
2824 struct dpif_netlink_ct_dump_state *dump;
2825
2826 INIT_CONTAINER(dump, dump_, up);
2827
2828 int err = nl_ct_dump_done(dump->nl_ct_dump);
2829 free(dump);
2830 return err;
2831 }
2832
2833 static int
2834 dpif_netlink_ct_flush(struct dpif *dpif OVS_UNUSED, const uint16_t *zone,
2835 const struct ct_dpif_tuple *tuple)
2836 {
2837 if (tuple) {
2838 return nl_ct_flush_tuple(tuple, zone ? *zone : 0);
2839 } else if (zone) {
2840 return nl_ct_flush_zone(*zone);
2841 } else {
2842 return nl_ct_flush();
2843 }
2844 }
2845
2846 static int
2847 dpif_netlink_ct_set_limits(struct dpif *dpif OVS_UNUSED,
2848 const uint32_t *default_limits,
2849 const struct ovs_list *zone_limits)
2850 {
2851 struct ovs_zone_limit req_zone_limit;
2852
2853 if (ovs_ct_limit_family < 0) {
2854 return EOPNOTSUPP;
2855 }
2856
2857 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
2858 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
2859 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_SET,
2860 OVS_CT_LIMIT_VERSION);
2861
2862 struct ovs_header *ovs_header;
2863 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
2864 ovs_header->dp_ifindex = 0;
2865
2866 size_t opt_offset;
2867 opt_offset = nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2868 if (default_limits) {
2869 req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
2870 req_zone_limit.limit = *default_limits;
2871 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2872 }
2873
2874 if (!ovs_list_is_empty(zone_limits)) {
2875 struct ct_dpif_zone_limit *zone_limit;
2876
2877 LIST_FOR_EACH (zone_limit, node, zone_limits) {
2878 req_zone_limit.zone_id = zone_limit->zone;
2879 req_zone_limit.limit = zone_limit->limit;
2880 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2881 }
2882 }
2883 nl_msg_end_nested(request, opt_offset);
2884
2885 int err = nl_transact(NETLINK_GENERIC, request, NULL);
2886 ofpbuf_delete(request);
2887 return err;
2888 }
2889
2890 static int
2891 dpif_netlink_zone_limits_from_ofpbuf(const struct ofpbuf *buf,
2892 uint32_t *default_limit,
2893 struct ovs_list *zone_limits)
2894 {
2895 static const struct nl_policy ovs_ct_limit_policy[] = {
2896 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NL_A_NESTED,
2897 .optional = true },
2898 };
2899
2900 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
2901 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2902 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
2903 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2904
2905 struct nlattr *attr[ARRAY_SIZE(ovs_ct_limit_policy)];
2906
2907 if (!nlmsg || !genl || !ovs_header
2908 || nlmsg->nlmsg_type != ovs_ct_limit_family
2909 || !nl_policy_parse(&b, 0, ovs_ct_limit_policy, attr,
2910 ARRAY_SIZE(ovs_ct_limit_policy))) {
2911 return EINVAL;
2912 }
2913
2914
2915 if (!attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2916 return EINVAL;
2917 }
2918
2919 int rem = NLA_ALIGN(
2920 nl_attr_get_size(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]));
2921 const struct ovs_zone_limit *zone_limit =
2922 nl_attr_get(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]);
2923
2924 while (rem >= sizeof *zone_limit) {
2925 if (zone_limit->zone_id == OVS_ZONE_LIMIT_DEFAULT_ZONE) {
2926 *default_limit = zone_limit->limit;
2927 } else if (zone_limit->zone_id < OVS_ZONE_LIMIT_DEFAULT_ZONE ||
2928 zone_limit->zone_id > UINT16_MAX) {
2929 } else {
2930 ct_dpif_push_zone_limit(zone_limits, zone_limit->zone_id,
2931 zone_limit->limit, zone_limit->count);
2932 }
2933 rem -= NLA_ALIGN(sizeof *zone_limit);
2934 zone_limit = ALIGNED_CAST(struct ovs_zone_limit *,
2935 (unsigned char *) zone_limit + NLA_ALIGN(sizeof *zone_limit));
2936 }
2937 return 0;
2938 }
2939
2940 static int
2941 dpif_netlink_ct_get_limits(struct dpif *dpif OVS_UNUSED,
2942 uint32_t *default_limit,
2943 const struct ovs_list *zone_limits_request,
2944 struct ovs_list *zone_limits_reply)
2945 {
2946 if (ovs_ct_limit_family < 0) {
2947 return EOPNOTSUPP;
2948 }
2949
2950 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
2951 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
2952 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_GET,
2953 OVS_CT_LIMIT_VERSION);
2954
2955 struct ovs_header *ovs_header;
2956 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
2957 ovs_header->dp_ifindex = 0;
2958
2959 if (!ovs_list_is_empty(zone_limits_request)) {
2960 size_t opt_offset = nl_msg_start_nested(request,
2961 OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2962
2963 struct ovs_zone_limit req_zone_limit;
2964 req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
2965 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2966
2967 struct ct_dpif_zone_limit *zone_limit;
2968 LIST_FOR_EACH (zone_limit, node, zone_limits_request) {
2969 req_zone_limit.zone_id = zone_limit->zone;
2970 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2971 }
2972
2973 nl_msg_end_nested(request, opt_offset);
2974 }
2975
2976 struct ofpbuf *reply;
2977 int err = nl_transact(NETLINK_GENERIC, request, &reply);
2978 if (err) {
2979 goto out;
2980 }
2981
2982 err = dpif_netlink_zone_limits_from_ofpbuf(reply, default_limit,
2983 zone_limits_reply);
2984
2985 out:
2986 ofpbuf_delete(request);
2987 ofpbuf_delete(reply);
2988 return err;
2989 }
2990
2991 static int
2992 dpif_netlink_ct_del_limits(struct dpif *dpif OVS_UNUSED,
2993 const struct ovs_list *zone_limits)
2994 {
2995 if (ovs_ct_limit_family < 0) {
2996 return EOPNOTSUPP;
2997 }
2998
2999 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
3000 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
3001 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_DEL,
3002 OVS_CT_LIMIT_VERSION);
3003
3004 struct ovs_header *ovs_header;
3005 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
3006 ovs_header->dp_ifindex = 0;
3007
3008 if (!ovs_list_is_empty(zone_limits)) {
3009 size_t opt_offset =
3010 nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
3011
3012 struct ct_dpif_zone_limit *zone_limit;
3013 LIST_FOR_EACH (zone_limit, node, zone_limits) {
3014 struct ovs_zone_limit req_zone_limit;
3015 req_zone_limit.zone_id = zone_limit->zone;
3016 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
3017 }
3018 nl_msg_end_nested(request, opt_offset);
3019 }
3020
3021 int err = nl_transact(NETLINK_GENERIC, request, NULL);
3022
3023 ofpbuf_delete(request);
3024 return err;
3025 }
3026
3027 #define NL_TP_NAME_PREFIX "ovs_tp_"
3028
3029 struct dpif_netlink_timeout_policy_protocol {
3030 uint16_t l3num;
3031 uint8_t l4num;
3032 };
3033
3034 enum OVS_PACKED_ENUM dpif_netlink_support_timeout_policy_protocol {
3035 DPIF_NL_TP_AF_INET_TCP,
3036 DPIF_NL_TP_AF_INET_UDP,
3037 DPIF_NL_TP_AF_INET_ICMP,
3038 DPIF_NL_TP_AF_INET6_TCP,
3039 DPIF_NL_TP_AF_INET6_UDP,
3040 DPIF_NL_TP_AF_INET6_ICMPV6,
3041 DPIF_NL_TP_MAX
3042 };
3043
3044 #define DPIF_NL_ALL_TP ((1UL << DPIF_NL_TP_MAX) - 1)
3045
3046
3047 static struct dpif_netlink_timeout_policy_protocol tp_protos[] = {
3048 [DPIF_NL_TP_AF_INET_TCP] = { .l3num = AF_INET, .l4num = IPPROTO_TCP },
3049 [DPIF_NL_TP_AF_INET_UDP] = { .l3num = AF_INET, .l4num = IPPROTO_UDP },
3050 [DPIF_NL_TP_AF_INET_ICMP] = { .l3num = AF_INET, .l4num = IPPROTO_ICMP },
3051 [DPIF_NL_TP_AF_INET6_TCP] = { .l3num = AF_INET6, .l4num = IPPROTO_TCP },
3052 [DPIF_NL_TP_AF_INET6_UDP] = { .l3num = AF_INET6, .l4num = IPPROTO_UDP },
3053 [DPIF_NL_TP_AF_INET6_ICMPV6] = { .l3num = AF_INET6,
3054 .l4num = IPPROTO_ICMPV6 },
3055 };
3056
3057 static void
3058 dpif_netlink_format_tp_name(uint32_t id, uint16_t l3num, uint8_t l4num,
3059 struct ds *tp_name)
3060 {
3061 ds_clear(tp_name);
3062 ds_put_format(tp_name, "%s%"PRIu32"_", NL_TP_NAME_PREFIX, id);
3063 ct_dpif_format_ipproto(tp_name, l4num);
3064
3065 if (l3num == AF_INET) {
3066 ds_put_cstr(tp_name, "4");
3067 } else if (l3num == AF_INET6 && l4num != IPPROTO_ICMPV6) {
3068 ds_put_cstr(tp_name, "6");
3069 }
3070
3071 ovs_assert(tp_name->length < CTNL_TIMEOUT_NAME_MAX);
3072 }
3073
3074 #define CT_DPIF_NL_TP_TCP_MAPPINGS \
3075 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT, SYN_SENT) \
3076 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_RECV, SYN_RECV) \
3077 CT_DPIF_NL_TP_MAPPING(TCP, TCP, ESTABLISHED, ESTABLISHED) \
3078 CT_DPIF_NL_TP_MAPPING(TCP, TCP, FIN_WAIT, FIN_WAIT) \
3079 CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE_WAIT, CLOSE_WAIT) \
3080 CT_DPIF_NL_TP_MAPPING(TCP, TCP, LAST_ACK, LAST_ACK) \
3081 CT_DPIF_NL_TP_MAPPING(TCP, TCP, TIME_WAIT, TIME_WAIT) \
3082 CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE, CLOSE) \
3083 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT2, SYN_SENT2) \
3084 CT_DPIF_NL_TP_MAPPING(TCP, TCP, RETRANSMIT, RETRANS) \
3085 CT_DPIF_NL_TP_MAPPING(TCP, TCP, UNACK, UNACK)
3086
3087 #define CT_DPIF_NL_TP_UDP_MAPPINGS \
3088 CT_DPIF_NL_TP_MAPPING(UDP, UDP, SINGLE, UNREPLIED) \
3089 CT_DPIF_NL_TP_MAPPING(UDP, UDP, MULTIPLE, REPLIED)
3090
3091 #define CT_DPIF_NL_TP_ICMP_MAPPINGS \
3092 CT_DPIF_NL_TP_MAPPING(ICMP, ICMP, FIRST, TIMEOUT)
3093
3094 #define CT_DPIF_NL_TP_ICMPV6_MAPPINGS \
3095 CT_DPIF_NL_TP_MAPPING(ICMP, ICMPV6, FIRST, TIMEOUT)
3096
3097
3098 #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
3099 if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
3100 nl_tp->present |= 1 << CTA_TIMEOUT_##PROTO2##_##ATTR2; \
3101 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2] = \
3102 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]; \
3103 }
3104
3105 static void
3106 dpif_netlink_get_nl_tp_tcp_attrs(const struct ct_dpif_timeout_policy *tp,
3107 struct nl_ct_timeout_policy *nl_tp)
3108 {
3109 CT_DPIF_NL_TP_TCP_MAPPINGS
3110 }
3111
3112 static void
3113 dpif_netlink_get_nl_tp_udp_attrs(const struct ct_dpif_timeout_policy *tp,
3114 struct nl_ct_timeout_policy *nl_tp)
3115 {
3116 CT_DPIF_NL_TP_UDP_MAPPINGS
3117 }
3118
3119 static void
3120 dpif_netlink_get_nl_tp_icmp_attrs(const struct ct_dpif_timeout_policy *tp,
3121 struct nl_ct_timeout_policy *nl_tp)
3122 {
3123 CT_DPIF_NL_TP_ICMP_MAPPINGS
3124 }
3125
3126 static void
3127 dpif_netlink_get_nl_tp_icmpv6_attrs(const struct ct_dpif_timeout_policy *tp,
3128 struct nl_ct_timeout_policy *nl_tp)
3129 {
3130 CT_DPIF_NL_TP_ICMPV6_MAPPINGS
3131 }
3132
3133 #undef CT_DPIF_NL_TP_MAPPING
3134
3135 static void
3136 dpif_netlink_get_nl_tp_attrs(const struct ct_dpif_timeout_policy *tp,
3137 uint8_t l4num, struct nl_ct_timeout_policy *nl_tp)
3138 {
3139 nl_tp->present = 0;
3140
3141 if (l4num == IPPROTO_TCP) {
3142 dpif_netlink_get_nl_tp_tcp_attrs(tp, nl_tp);
3143 } else if (l4num == IPPROTO_UDP) {
3144 dpif_netlink_get_nl_tp_udp_attrs(tp, nl_tp);
3145 } else if (l4num == IPPROTO_ICMP) {
3146 dpif_netlink_get_nl_tp_icmp_attrs(tp, nl_tp);
3147 } else if (l4num == IPPROTO_ICMPV6) {
3148 dpif_netlink_get_nl_tp_icmpv6_attrs(tp, nl_tp);
3149 }
3150 }
3151
3152 #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
3153 if (nl_tp->present & (1 << CTA_TIMEOUT_##PROTO2##_##ATTR2)) { \
3154 if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
3155 if (tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] != \
3156 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]) { \
3157 VLOG_WARN_RL(&error_rl, "Inconsistent timeout policy %s " \
3158 "attribute %s=%"PRIu32" while %s=%"PRIu32, \
3159 nl_tp->name, "CTA_TIMEOUT_"#PROTO2"_"#ATTR2, \
3160 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2], \
3161 "CT_DPIF_TP_ATTR_"#PROTO1"_"#ATTR1, \
3162 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]); \
3163 } \
3164 } else { \
3165 tp->present |= 1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1; \
3166 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] = \
3167 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]; \
3168 } \
3169 }
3170
3171 static void
3172 dpif_netlink_set_ct_dpif_tp_tcp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3173 struct ct_dpif_timeout_policy *tp)
3174 {
3175 CT_DPIF_NL_TP_TCP_MAPPINGS
3176 }
3177
3178 static void
3179 dpif_netlink_set_ct_dpif_tp_udp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3180 struct ct_dpif_timeout_policy *tp)
3181 {
3182 CT_DPIF_NL_TP_UDP_MAPPINGS
3183 }
3184
3185 static void
3186 dpif_netlink_set_ct_dpif_tp_icmp_attrs(
3187 const struct nl_ct_timeout_policy *nl_tp,
3188 struct ct_dpif_timeout_policy *tp)
3189 {
3190 CT_DPIF_NL_TP_ICMP_MAPPINGS
3191 }
3192
3193 static void
3194 dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(
3195 const struct nl_ct_timeout_policy *nl_tp,
3196 struct ct_dpif_timeout_policy *tp)
3197 {
3198 CT_DPIF_NL_TP_ICMPV6_MAPPINGS
3199 }
3200
3201 #undef CT_DPIF_NL_TP_MAPPING
3202
3203 static void
3204 dpif_netlink_set_ct_dpif_tp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3205 struct ct_dpif_timeout_policy *tp)
3206 {
3207 if (nl_tp->l4num == IPPROTO_TCP) {
3208 dpif_netlink_set_ct_dpif_tp_tcp_attrs(nl_tp, tp);
3209 } else if (nl_tp->l4num == IPPROTO_UDP) {
3210 dpif_netlink_set_ct_dpif_tp_udp_attrs(nl_tp, tp);
3211 } else if (nl_tp->l4num == IPPROTO_ICMP) {
3212 dpif_netlink_set_ct_dpif_tp_icmp_attrs(nl_tp, tp);
3213 } else if (nl_tp->l4num == IPPROTO_ICMPV6) {
3214 dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(nl_tp, tp);
3215 }
3216 }
3217
3218 #ifdef _WIN32
3219 static int
3220 dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
3221 const struct ct_dpif_timeout_policy *tp)
3222 {
3223 return EOPNOTSUPP;
3224 }
3225
3226 static int
3227 dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
3228 uint32_t tp_id,
3229 struct ct_dpif_timeout_policy *tp)
3230 {
3231 return EOPNOTSUPP;
3232 }
3233
3234 static int
3235 dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
3236 uint32_t tp_id)
3237 {
3238 return EOPNOTSUPP;
3239 }
3240
3241 static int
3242 dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
3243 void **statep)
3244 {
3245 return EOPNOTSUPP;
3246 }
3247
3248 static int
3249 dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
3250 void *state,
3251 struct ct_dpif_timeout_policy **tp)
3252 {
3253 return EOPNOTSUPP;
3254 }
3255
3256 static int
3257 dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
3258 void *state)
3259 {
3260 return EOPNOTSUPP;
3261 }
3262 #else
3263 static int
3264 dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
3265 const struct ct_dpif_timeout_policy *tp)
3266 {
3267 struct nl_ct_timeout_policy nl_tp;
3268 struct ds nl_tp_name = DS_EMPTY_INITIALIZER;
3269 int err = 0;
3270
3271 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3272 dpif_netlink_format_tp_name(tp->id, tp_protos[i].l3num,
3273 tp_protos[i].l4num, &nl_tp_name);
3274 ovs_strlcpy(nl_tp.name, ds_cstr(&nl_tp_name), sizeof nl_tp.name);
3275 nl_tp.l3num = tp_protos[i].l3num;
3276 nl_tp.l4num = tp_protos[i].l4num;
3277 dpif_netlink_get_nl_tp_attrs(tp, tp_protos[i].l4num, &nl_tp);
3278 err = nl_ct_set_timeout_policy(&nl_tp);
3279 if (err) {
3280 VLOG_WARN_RL(&error_rl, "failed to add timeout policy %s (%s)",
3281 nl_tp.name, ovs_strerror(err));
3282 goto out;
3283 }
3284 }
3285
3286 out:
3287 ds_destroy(&nl_tp_name);
3288 return err;
3289 }
3290
3291 static int
3292 dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
3293 uint32_t tp_id,
3294 struct ct_dpif_timeout_policy *tp)
3295 {
3296 struct nl_ct_timeout_policy nl_tp;
3297 struct ds nl_tp_name = DS_EMPTY_INITIALIZER;
3298 int err = 0;
3299
3300 tp->id = tp_id;
3301 tp->present = 0;
3302 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3303 dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
3304 tp_protos[i].l4num, &nl_tp_name);
3305 err = nl_ct_get_timeout_policy(ds_cstr(&nl_tp_name), &nl_tp);
3306
3307 if (err) {
3308 VLOG_WARN_RL(&error_rl, "failed to get timeout policy %s (%s)",
3309 nl_tp.name, ovs_strerror(err));
3310 goto out;
3311 }
3312 dpif_netlink_set_ct_dpif_tp_attrs(&nl_tp, tp);
3313 }
3314
3315 out:
3316 ds_destroy(&nl_tp_name);
3317 return err;
3318 }
3319
3320 /* Returns 0 if all the sub timeout policies are deleted or not exist in the
3321 * kernel. Returns 1 if any sub timeout policy deletion failed. */
3322 static int
3323 dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
3324 uint32_t tp_id)
3325 {
3326 struct ds nl_tp_name = DS_EMPTY_INITIALIZER;
3327 int ret = 0;
3328
3329 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3330 dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
3331 tp_protos[i].l4num, &nl_tp_name);
3332 int err = nl_ct_del_timeout_policy(ds_cstr(&nl_tp_name));
3333 if (err == ENOENT) {
3334 err = 0;
3335 }
3336 if (err) {
3337 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(6, 6);
3338 VLOG_INFO_RL(&rl, "failed to delete timeout policy %s (%s)",
3339 ds_cstr(&nl_tp_name), ovs_strerror(err));
3340 ret = 1;
3341 }
3342 }
3343
3344 ds_destroy(&nl_tp_name);
3345 return ret;
3346 }
3347
3348 struct dpif_netlink_ct_timeout_policy_dump_state {
3349 struct nl_ct_timeout_policy_dump_state *nl_dump_state;
3350 struct hmap tp_dump_map;
3351 };
3352
3353 struct dpif_netlink_tp_dump_node {
3354 struct hmap_node hmap_node; /* node in tp_dump_map. */
3355 struct ct_dpif_timeout_policy *tp;
3356 uint32_t l3_l4_present;
3357 };
3358
3359 static struct dpif_netlink_tp_dump_node *
3360 get_dpif_netlink_tp_dump_node_by_tp_id(uint32_t tp_id,
3361 struct hmap *tp_dump_map)
3362 {
3363 struct dpif_netlink_tp_dump_node *tp_dump_node;
3364
3365 HMAP_FOR_EACH_WITH_HASH (tp_dump_node, hmap_node, hash_int(tp_id, 0),
3366 tp_dump_map) {
3367 if (tp_dump_node->tp->id == tp_id) {
3368 return tp_dump_node;
3369 }
3370 }
3371 return NULL;
3372 }
3373
3374 static void
3375 update_dpif_netlink_tp_dump_node(
3376 const struct nl_ct_timeout_policy *nl_tp,
3377 struct dpif_netlink_tp_dump_node *tp_dump_node)
3378 {
3379 dpif_netlink_set_ct_dpif_tp_attrs(nl_tp, tp_dump_node->tp);
3380 for (int i = 0; i < DPIF_NL_TP_MAX; ++i) {
3381 if (nl_tp->l3num == tp_protos[i].l3num &&
3382 nl_tp->l4num == tp_protos[i].l4num) {
3383 tp_dump_node->l3_l4_present |= 1 << i;
3384 break;
3385 }
3386 }
3387 }
3388
3389 static int
3390 dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
3391 void **statep)
3392 {
3393 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state;
3394
3395 *statep = dump_state = xzalloc(sizeof *dump_state);
3396 int err = nl_ct_timeout_policy_dump_start(&dump_state->nl_dump_state);
3397 if (err) {
3398 free(dump_state);
3399 return err;
3400 }
3401 hmap_init(&dump_state->tp_dump_map);
3402 return 0;
3403 }
3404
3405 static void
3406 get_and_cleanup_tp_dump_node(struct hmap *hmap,
3407 struct dpif_netlink_tp_dump_node *tp_dump_node,
3408 struct ct_dpif_timeout_policy *tp)
3409 {
3410 hmap_remove(hmap, &tp_dump_node->hmap_node);
3411 *tp = *tp_dump_node->tp;
3412 free(tp_dump_node->tp);
3413 free(tp_dump_node);
3414 }
3415
3416 static int
3417 dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
3418 void *state,
3419 struct ct_dpif_timeout_policy *tp)
3420 {
3421 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
3422 struct dpif_netlink_tp_dump_node *tp_dump_node;
3423 int err;
3424
3425 /* Dumps all the timeout policies in the kernel. */
3426 do {
3427 struct nl_ct_timeout_policy nl_tp;
3428 uint32_t tp_id;
3429
3430 err = nl_ct_timeout_policy_dump_next(dump_state->nl_dump_state,
3431 &nl_tp);
3432 if (err) {
3433 break;
3434 }
3435
3436 /* We only interest in OVS installed timeout policies. */
3437 if (!ovs_scan(nl_tp.name, NL_TP_NAME_PREFIX"%"PRIu32, &tp_id)) {
3438 continue;
3439 }
3440
3441 tp_dump_node = get_dpif_netlink_tp_dump_node_by_tp_id(
3442 tp_id, &dump_state->tp_dump_map);
3443 if (!tp_dump_node) {
3444 tp_dump_node = xzalloc(sizeof *tp_dump_node);
3445 tp_dump_node->tp = xzalloc(sizeof *tp_dump_node->tp);
3446 tp_dump_node->tp->id = tp_id;
3447 hmap_insert(&dump_state->tp_dump_map, &tp_dump_node->hmap_node,
3448 hash_int(tp_id, 0));
3449 }
3450
3451 update_dpif_netlink_tp_dump_node(&nl_tp, tp_dump_node);
3452
3453 /* Returns one ct_dpif_timeout_policy if we gather all the L3/L4
3454 * sub-pieces. */
3455 if (tp_dump_node->l3_l4_present == DPIF_NL_ALL_TP) {
3456 get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
3457 tp_dump_node, tp);
3458 break;
3459 }
3460 } while (true);
3461
3462 /* Dump the incomplete timeout policies. */
3463 if (err == EOF) {
3464 if (!hmap_is_empty(&dump_state->tp_dump_map)) {
3465 struct hmap_node *hmap_node = hmap_first(&dump_state->tp_dump_map);
3466 tp_dump_node = CONTAINER_OF(hmap_node,
3467 struct dpif_netlink_tp_dump_node,
3468 hmap_node);
3469 get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
3470 tp_dump_node, tp);
3471 return 0;
3472 }
3473 }
3474
3475 return err;
3476 }
3477
3478 static int
3479 dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
3480 void *state)
3481 {
3482 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
3483 struct dpif_netlink_tp_dump_node *tp_dump_node;
3484
3485 int err = nl_ct_timeout_policy_dump_done(dump_state->nl_dump_state);
3486 HMAP_FOR_EACH_POP (tp_dump_node, hmap_node, &dump_state->tp_dump_map) {
3487 free(tp_dump_node->tp);
3488 free(tp_dump_node);
3489 }
3490 hmap_destroy(&dump_state->tp_dump_map);
3491 free(dump_state);
3492 return err;
3493 }
3494 #endif
3495
3496 \f
3497 /* Meters */
3498
3499 /* Set of supported meter flags */
3500 #define DP_SUPPORTED_METER_FLAGS_MASK \
3501 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
3502
3503 /* Meter support was introduced in Linux 4.15. In some versions of
3504 * Linux 4.15, 4.16, and 4.17, there was a bug that never set the id
3505 * when the meter was created, so all meters essentially had an id of
3506 * zero. Check for that condition and disable meters on those kernels. */
3507 static bool probe_broken_meters(struct dpif *);
3508
3509 static void
3510 dpif_netlink_meter_init(struct dpif_netlink *dpif, struct ofpbuf *buf,
3511 void *stub, size_t size, uint32_t command)
3512 {
3513 ofpbuf_use_stub(buf, stub, size);
3514
3515 nl_msg_put_genlmsghdr(buf, 0, ovs_meter_family, NLM_F_REQUEST | NLM_F_ECHO,
3516 command, OVS_METER_VERSION);
3517
3518 struct ovs_header *ovs_header;
3519 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
3520 ovs_header->dp_ifindex = dpif->dp_ifindex;
3521 }
3522
3523 /* Execute meter 'request' in the kernel datapath. If the command
3524 * fails, returns a positive errno value. Otherwise, stores the reply
3525 * in '*replyp', parses the policy according to 'reply_policy' into the
3526 * array of Netlink attribute in 'a', and returns 0. On success, the
3527 * caller is responsible for calling ofpbuf_delete() on '*replyp'
3528 * ('replyp' will contain pointers into 'a'). */
3529 static int
3530 dpif_netlink_meter_transact(struct ofpbuf *request, struct ofpbuf **replyp,
3531 const struct nl_policy *reply_policy,
3532 struct nlattr **a, size_t size_a)
3533 {
3534 int error = nl_transact(NETLINK_GENERIC, request, replyp);
3535 ofpbuf_uninit(request);
3536
3537 if (error) {
3538 return error;
3539 }
3540
3541 struct nlmsghdr *nlmsg = ofpbuf_try_pull(*replyp, sizeof *nlmsg);
3542 struct genlmsghdr *genl = ofpbuf_try_pull(*replyp, sizeof *genl);
3543 struct ovs_header *ovs_header = ofpbuf_try_pull(*replyp,
3544 sizeof *ovs_header);
3545 if (!nlmsg || !genl || !ovs_header
3546 || nlmsg->nlmsg_type != ovs_meter_family
3547 || !nl_policy_parse(*replyp, 0, reply_policy, a, size_a)) {
3548 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3549 VLOG_DBG_RL(&rl,
3550 "Kernel module response to meter tranaction is invalid");
3551 return EINVAL;
3552 }
3553 return 0;
3554 }
3555
3556 static void
3557 dpif_netlink_meter_get_features(const struct dpif *dpif_,
3558 struct ofputil_meter_features *features)
3559 {
3560 if (probe_broken_meters(CONST_CAST(struct dpif *, dpif_))) {
3561 features = NULL;
3562 return;
3563 }
3564
3565 struct ofpbuf buf, *msg;
3566 uint64_t stub[1024 / 8];
3567
3568 static const struct nl_policy ovs_meter_features_policy[] = {
3569 [OVS_METER_ATTR_MAX_METERS] = { .type = NL_A_U32 },
3570 [OVS_METER_ATTR_MAX_BANDS] = { .type = NL_A_U32 },
3571 [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
3572 };
3573 struct nlattr *a[ARRAY_SIZE(ovs_meter_features_policy)];
3574
3575 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3576 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub,
3577 OVS_METER_CMD_FEATURES);
3578 if (dpif_netlink_meter_transact(&buf, &msg, ovs_meter_features_policy, a,
3579 ARRAY_SIZE(ovs_meter_features_policy))) {
3580 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3581 VLOG_INFO_RL(&rl,
3582 "dpif_netlink_meter_transact OVS_METER_CMD_FEATURES failed");
3583 return;
3584 }
3585
3586 features->max_meters = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_METERS]);
3587 features->max_bands = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_BANDS]);
3588
3589 /* Bands is a nested attribute of zero or more nested
3590 * band attributes. */
3591 if (a[OVS_METER_ATTR_BANDS]) {
3592 const struct nlattr *nla;
3593 size_t left;
3594
3595 NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
3596 const struct nlattr *band_nla;
3597 size_t band_left;
3598
3599 NL_NESTED_FOR_EACH (band_nla, band_left, nla) {
3600 if (nl_attr_type(band_nla) == OVS_BAND_ATTR_TYPE) {
3601 if (nl_attr_get_size(band_nla) == sizeof(uint32_t)) {
3602 switch (nl_attr_get_u32(band_nla)) {
3603 case OVS_METER_BAND_TYPE_DROP:
3604 features->band_types |= 1 << OFPMBT13_DROP;
3605 break;
3606 }
3607 }
3608 }
3609 }
3610 }
3611 }
3612 features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK;
3613
3614 ofpbuf_delete(msg);
3615 }
3616
3617 static int
3618 dpif_netlink_meter_set__(struct dpif *dpif_, ofproto_meter_id meter_id,
3619 struct ofputil_meter_config *config)
3620 {
3621 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3622 struct ofpbuf buf, *msg;
3623 uint64_t stub[1024 / 8];
3624
3625 static const struct nl_policy ovs_meter_set_response_policy[] = {
3626 [OVS_METER_ATTR_ID] = { .type = NL_A_U32 },
3627 };
3628 struct nlattr *a[ARRAY_SIZE(ovs_meter_set_response_policy)];
3629
3630 if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK) {
3631 return EBADF; /* Unsupported flags set */
3632 }
3633
3634 for (size_t i = 0; i < config->n_bands; i++) {
3635 switch (config->bands[i].type) {
3636 case OFPMBT13_DROP:
3637 break;
3638 default:
3639 return ENODEV; /* Unsupported band type */
3640 }
3641 }
3642
3643 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, OVS_METER_CMD_SET);
3644
3645 nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
3646
3647 if (config->flags & OFPMF13_KBPS) {
3648 nl_msg_put_flag(&buf, OVS_METER_ATTR_KBPS);
3649 }
3650
3651 size_t bands_offset = nl_msg_start_nested(&buf, OVS_METER_ATTR_BANDS);
3652 /* Bands */
3653 for (size_t i = 0; i < config->n_bands; ++i) {
3654 struct ofputil_meter_band * band = &config->bands[i];
3655 uint32_t band_type;
3656
3657 size_t band_offset = nl_msg_start_nested(&buf, OVS_BAND_ATTR_UNSPEC);
3658
3659 switch (band->type) {
3660 case OFPMBT13_DROP:
3661 band_type = OVS_METER_BAND_TYPE_DROP;
3662 break;
3663 default:
3664 band_type = OVS_METER_BAND_TYPE_UNSPEC;
3665 }
3666 nl_msg_put_u32(&buf, OVS_BAND_ATTR_TYPE, band_type);
3667 nl_msg_put_u32(&buf, OVS_BAND_ATTR_RATE, band->rate);
3668 nl_msg_put_u32(&buf, OVS_BAND_ATTR_BURST,
3669 config->flags & OFPMF13_BURST ?
3670 band->burst_size : band->rate);
3671 nl_msg_end_nested(&buf, band_offset);
3672 }
3673 nl_msg_end_nested(&buf, bands_offset);
3674
3675 int error = dpif_netlink_meter_transact(&buf, &msg,
3676 ovs_meter_set_response_policy, a,
3677 ARRAY_SIZE(ovs_meter_set_response_policy));
3678 if (error) {
3679 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3680 VLOG_INFO_RL(&rl,
3681 "dpif_netlink_meter_transact OVS_METER_CMD_SET failed");
3682 return error;
3683 }
3684
3685 if (nl_attr_get_u32(a[OVS_METER_ATTR_ID]) != meter_id.uint32) {
3686 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3687 VLOG_INFO_RL(&rl,
3688 "Kernel returned a different meter id than requested");
3689 }
3690 ofpbuf_delete(msg);
3691 return 0;
3692 }
3693
3694 static int
3695 dpif_netlink_meter_set(struct dpif *dpif_, ofproto_meter_id meter_id,
3696 struct ofputil_meter_config *config)
3697 {
3698 if (probe_broken_meters(dpif_)) {
3699 return ENOMEM;
3700 }
3701
3702 return dpif_netlink_meter_set__(dpif_, meter_id, config);
3703 }
3704
3705 /* Retrieve statistics and/or delete meter 'meter_id'. Statistics are
3706 * stored in 'stats', if it is not null. If 'command' is
3707 * OVS_METER_CMD_DEL, the meter is deleted and statistics are optionally
3708 * retrieved. If 'command' is OVS_METER_CMD_GET, then statistics are
3709 * simply retrieved. */
3710 static int
3711 dpif_netlink_meter_get_stats(const struct dpif *dpif_,
3712 ofproto_meter_id meter_id,
3713 struct ofputil_meter_stats *stats,
3714 uint16_t max_bands,
3715 enum ovs_meter_cmd command)
3716 {
3717 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3718 struct ofpbuf buf, *msg;
3719 uint64_t stub[1024 / 8];
3720
3721 static const struct nl_policy ovs_meter_stats_policy[] = {
3722 [OVS_METER_ATTR_ID] = { .type = NL_A_U32, .optional = true},
3723 [OVS_METER_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
3724 .optional = true},
3725 [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
3726 };
3727 struct nlattr *a[ARRAY_SIZE(ovs_meter_stats_policy)];
3728
3729 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, command);
3730
3731 nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
3732
3733 int error = dpif_netlink_meter_transact(&buf, &msg,
3734 ovs_meter_stats_policy, a,
3735 ARRAY_SIZE(ovs_meter_stats_policy));
3736 if (error) {
3737 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3738 VLOG_INFO_RL(&rl, "dpif_netlink_meter_transact %s failed",
3739 command == OVS_METER_CMD_GET ? "get" : "del");
3740 return error;
3741 }
3742
3743 if (stats
3744 && a[OVS_METER_ATTR_ID]
3745 && a[OVS_METER_ATTR_STATS]
3746 && nl_attr_get_u32(a[OVS_METER_ATTR_ID]) == meter_id.uint32) {
3747 /* return stats */
3748 const struct ovs_flow_stats *stat;
3749 const struct nlattr *nla;
3750 size_t left;
3751
3752 stat = nl_attr_get(a[OVS_METER_ATTR_STATS]);
3753 stats->packet_in_count = get_32aligned_u64(&stat->n_packets);
3754 stats->byte_in_count = get_32aligned_u64(&stat->n_bytes);
3755
3756 if (a[OVS_METER_ATTR_BANDS]) {
3757 size_t n_bands = 0;
3758 NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
3759 const struct nlattr *band_nla;
3760 band_nla = nl_attr_find_nested(nla, OVS_BAND_ATTR_STATS);
3761 if (band_nla && nl_attr_get_size(band_nla) \
3762 == sizeof(struct ovs_flow_stats)) {
3763 stat = nl_attr_get(band_nla);
3764
3765 if (n_bands < max_bands) {
3766 stats->bands[n_bands].packet_count
3767 = get_32aligned_u64(&stat->n_packets);
3768 stats->bands[n_bands].byte_count
3769 = get_32aligned_u64(&stat->n_bytes);
3770 ++n_bands;
3771 }
3772 } else {
3773 stats->bands[n_bands].packet_count = 0;
3774 stats->bands[n_bands].byte_count = 0;
3775 ++n_bands;
3776 }
3777 }
3778 stats->n_bands = n_bands;
3779 } else {
3780 /* For a non-existent meter, return 0 stats. */
3781 stats->n_bands = 0;
3782 }
3783 }
3784
3785 ofpbuf_delete(msg);
3786 return error;
3787 }
3788
3789 static int
3790 dpif_netlink_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
3791 struct ofputil_meter_stats *stats, uint16_t max_bands)
3792 {
3793 return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
3794 OVS_METER_CMD_GET);
3795 }
3796
3797 static int
3798 dpif_netlink_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
3799 struct ofputil_meter_stats *stats, uint16_t max_bands)
3800 {
3801 return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
3802 OVS_METER_CMD_DEL);
3803 }
3804
3805 static bool
3806 probe_broken_meters__(struct dpif *dpif)
3807 {
3808 /* This test is destructive if a probe occurs while ovs-vswitchd is
3809 * running (e.g., an ovs-dpctl meter command is called), so choose a
3810 * random high meter id to make this less likely to occur. */
3811 ofproto_meter_id id1 = { 54545401 };
3812 ofproto_meter_id id2 = { 54545402 };
3813 struct ofputil_meter_band band = {OFPMBT13_DROP, 0, 1, 0};
3814 struct ofputil_meter_config config1 = { 1, OFPMF13_KBPS, 1, &band};
3815 struct ofputil_meter_config config2 = { 2, OFPMF13_KBPS, 1, &band};
3816
3817 /* Try adding two meters and make sure that they both come back with
3818 * the proper meter id. Use the "__" version so that we don't cause
3819 * a recurve deadlock. */
3820 dpif_netlink_meter_set__(dpif, id1, &config1);
3821 dpif_netlink_meter_set__(dpif, id2, &config2);
3822
3823 if (dpif_netlink_meter_get(dpif, id1, NULL, 0)
3824 || dpif_netlink_meter_get(dpif, id2, NULL, 0)) {
3825 VLOG_INFO("The kernel module has a broken meter implementation.");
3826 return true;
3827 }
3828
3829 dpif_netlink_meter_del(dpif, id1, NULL, 0);
3830 dpif_netlink_meter_del(dpif, id2, NULL, 0);
3831
3832 return false;
3833 }
3834
3835 static bool
3836 probe_broken_meters(struct dpif *dpif)
3837 {
3838 /* This is a once-only test because currently OVS only has at most a single
3839 * Netlink capable datapath on any given platform. */
3840 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3841
3842 static bool broken_meters = false;
3843 if (ovsthread_once_start(&once)) {
3844 broken_meters = probe_broken_meters__(dpif);
3845 ovsthread_once_done(&once);
3846 }
3847 return broken_meters;
3848 }
3849 \f
3850 const struct dpif_class dpif_netlink_class = {
3851 "system",
3852 false, /* cleanup_required */
3853 NULL, /* init */
3854 dpif_netlink_enumerate,
3855 NULL,
3856 dpif_netlink_open,
3857 dpif_netlink_close,
3858 dpif_netlink_destroy,
3859 dpif_netlink_run,
3860 NULL, /* wait */
3861 dpif_netlink_get_stats,
3862 dpif_netlink_port_add,
3863 dpif_netlink_port_del,
3864 NULL, /* port_set_config */
3865 dpif_netlink_port_query_by_number,
3866 dpif_netlink_port_query_by_name,
3867 dpif_netlink_port_get_pid,
3868 dpif_netlink_port_dump_start,
3869 dpif_netlink_port_dump_next,
3870 dpif_netlink_port_dump_done,
3871 dpif_netlink_port_poll,
3872 dpif_netlink_port_poll_wait,
3873 dpif_netlink_flow_flush,
3874 dpif_netlink_flow_dump_create,
3875 dpif_netlink_flow_dump_destroy,
3876 dpif_netlink_flow_dump_thread_create,
3877 dpif_netlink_flow_dump_thread_destroy,
3878 dpif_netlink_flow_dump_next,
3879 dpif_netlink_operate,
3880 dpif_netlink_recv_set,
3881 dpif_netlink_handlers_set,
3882 NULL, /* set_config */
3883 dpif_netlink_queue_to_priority,
3884 dpif_netlink_recv,
3885 dpif_netlink_recv_wait,
3886 dpif_netlink_recv_purge,
3887 NULL, /* register_dp_purge_cb */
3888 NULL, /* register_upcall_cb */
3889 NULL, /* enable_upcall */
3890 NULL, /* disable_upcall */
3891 dpif_netlink_get_datapath_version, /* get_datapath_version */
3892 dpif_netlink_ct_dump_start,
3893 dpif_netlink_ct_dump_next,
3894 dpif_netlink_ct_dump_done,
3895 dpif_netlink_ct_flush,
3896 NULL, /* ct_set_maxconns */
3897 NULL, /* ct_get_maxconns */
3898 NULL, /* ct_get_nconns */
3899 NULL, /* ct_set_tcp_seq_chk */
3900 NULL, /* ct_get_tcp_seq_chk */
3901 dpif_netlink_ct_set_limits,
3902 dpif_netlink_ct_get_limits,
3903 dpif_netlink_ct_del_limits,
3904 dpif_netlink_ct_set_timeout_policy,
3905 dpif_netlink_ct_get_timeout_policy,
3906 dpif_netlink_ct_del_timeout_policy,
3907 dpif_netlink_ct_timeout_policy_dump_start,
3908 dpif_netlink_ct_timeout_policy_dump_next,
3909 dpif_netlink_ct_timeout_policy_dump_done,
3910 NULL, /* ipf_set_enabled */
3911 NULL, /* ipf_set_min_frag */
3912 NULL, /* ipf_set_max_nfrags */
3913 NULL, /* ipf_get_status */
3914 NULL, /* ipf_dump_start */
3915 NULL, /* ipf_dump_next */
3916 NULL, /* ipf_dump_done */
3917 dpif_netlink_meter_get_features,
3918 dpif_netlink_meter_set,
3919 dpif_netlink_meter_get,
3920 dpif_netlink_meter_del,
3921 };
3922
3923 static int
3924 dpif_netlink_init(void)
3925 {
3926 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3927 static int error;
3928
3929 if (ovsthread_once_start(&once)) {
3930 error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
3931 &ovs_datapath_family);
3932 if (error) {
3933 VLOG_INFO("Generic Netlink family '%s' does not exist. "
3934 "The Open vSwitch kernel module is probably not loaded.",
3935 OVS_DATAPATH_FAMILY);
3936 }
3937 if (!error) {
3938 error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
3939 }
3940 if (!error) {
3941 error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
3942 }
3943 if (!error) {
3944 error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
3945 &ovs_packet_family);
3946 }
3947 if (!error) {
3948 error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
3949 &ovs_vport_mcgroup);
3950 }
3951 if (!error) {
3952 if (nl_lookup_genl_family(OVS_METER_FAMILY, &ovs_meter_family)) {
3953 VLOG_INFO("The kernel module does not support meters.");
3954 }
3955 }
3956 if (nl_lookup_genl_family(OVS_CT_LIMIT_FAMILY,
3957 &ovs_ct_limit_family) < 0) {
3958 VLOG_INFO("Generic Netlink family '%s' does not exist. "
3959 "Please update the Open vSwitch kernel module to enable "
3960 "the conntrack limit feature.", OVS_CT_LIMIT_FAMILY);
3961 }
3962
3963 ovs_tunnels_out_of_tree = dpif_netlink_rtnl_probe_oot_tunnels();
3964
3965 ovsthread_once_done(&once);
3966 }
3967
3968 return error;
3969 }
3970
3971 bool
3972 dpif_netlink_is_internal_device(const char *name)
3973 {
3974 struct dpif_netlink_vport reply;
3975 struct ofpbuf *buf;
3976 int error;
3977
3978 error = dpif_netlink_vport_get(name, &reply, &buf);
3979 if (!error) {
3980 ofpbuf_delete(buf);
3981 } else if (error != ENODEV && error != ENOENT) {
3982 VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
3983 name, ovs_strerror(error));
3984 }
3985
3986 return reply.type == OVS_VPORT_TYPE_INTERNAL;
3987 }
3988
3989 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
3990 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
3991 * positive errno value.
3992 *
3993 * 'vport' will contain pointers into 'buf', so the caller should not free
3994 * 'buf' while 'vport' is still in use. */
3995 static int
3996 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
3997 const struct ofpbuf *buf)
3998 {
3999 static const struct nl_policy ovs_vport_policy[] = {
4000 [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
4001 [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
4002 [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
4003 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
4004 [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
4005 .optional = true },
4006 [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
4007 [OVS_VPORT_ATTR_NETNSID] = { .type = NL_A_U32, .optional = true },
4008 };
4009
4010 dpif_netlink_vport_init(vport);
4011
4012 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4013 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4014 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4015 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4016
4017 struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
4018 if (!nlmsg || !genl || !ovs_header
4019 || nlmsg->nlmsg_type != ovs_vport_family
4020 || !nl_policy_parse(&b, 0, ovs_vport_policy, a,
4021 ARRAY_SIZE(ovs_vport_policy))) {
4022 return EINVAL;
4023 }
4024
4025 vport->cmd = genl->cmd;
4026 vport->dp_ifindex = ovs_header->dp_ifindex;
4027 vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
4028 vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
4029 vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
4030 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
4031 vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
4032 / (sizeof *vport->upcall_pids);
4033 vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
4034
4035 }
4036 if (a[OVS_VPORT_ATTR_STATS]) {
4037 vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
4038 }
4039 if (a[OVS_VPORT_ATTR_OPTIONS]) {
4040 vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
4041 vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
4042 }
4043 if (a[OVS_VPORT_ATTR_NETNSID]) {
4044 netnsid_set(&vport->netnsid,
4045 nl_attr_get_u32(a[OVS_VPORT_ATTR_NETNSID]));
4046 } else {
4047 netnsid_set_local(&vport->netnsid);
4048 }
4049 return 0;
4050 }
4051
4052 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
4053 * followed by Netlink attributes corresponding to 'vport'. */
4054 static void
4055 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
4056 struct ofpbuf *buf)
4057 {
4058 struct ovs_header *ovs_header;
4059
4060 nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
4061 vport->cmd, OVS_VPORT_VERSION);
4062
4063 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4064 ovs_header->dp_ifindex = vport->dp_ifindex;
4065
4066 if (vport->port_no != ODPP_NONE) {
4067 nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
4068 }
4069
4070 if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
4071 nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
4072 }
4073
4074 if (vport->name) {
4075 nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
4076 }
4077
4078 if (vport->upcall_pids) {
4079 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
4080 vport->upcall_pids,
4081 vport->n_upcall_pids * sizeof *vport->upcall_pids);
4082 }
4083
4084 if (vport->stats) {
4085 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
4086 vport->stats, sizeof *vport->stats);
4087 }
4088
4089 if (vport->options) {
4090 nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
4091 vport->options, vport->options_len);
4092 }
4093 }
4094
4095 /* Clears 'vport' to "empty" values. */
4096 void
4097 dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
4098 {
4099 memset(vport, 0, sizeof *vport);
4100 vport->port_no = ODPP_NONE;
4101 }
4102
4103 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4104 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4105 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4106 * result of the command is expected to be an ovs_vport also, which is decoded
4107 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
4108 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
4109 int
4110 dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
4111 struct dpif_netlink_vport *reply,
4112 struct ofpbuf **bufp)
4113 {
4114 struct ofpbuf *request_buf;
4115 int error;
4116
4117 ovs_assert((reply != NULL) == (bufp != NULL));
4118
4119 error = dpif_netlink_init();
4120 if (error) {
4121 if (reply) {
4122 *bufp = NULL;
4123 dpif_netlink_vport_init(reply);
4124 }
4125 return error;
4126 }
4127
4128 request_buf = ofpbuf_new(1024);
4129 dpif_netlink_vport_to_ofpbuf(request, request_buf);
4130 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4131 ofpbuf_delete(request_buf);
4132
4133 if (reply) {
4134 if (!error) {
4135 error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
4136 }
4137 if (error) {
4138 dpif_netlink_vport_init(reply);
4139 ofpbuf_delete(*bufp);
4140 *bufp = NULL;
4141 }
4142 }
4143 return error;
4144 }
4145
4146 /* Obtains information about the kernel vport named 'name' and stores it into
4147 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
4148 * longer needed ('reply' will contain pointers into '*bufp'). */
4149 int
4150 dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
4151 struct ofpbuf **bufp)
4152 {
4153 struct dpif_netlink_vport request;
4154
4155 dpif_netlink_vport_init(&request);
4156 request.cmd = OVS_VPORT_CMD_GET;
4157 request.name = name;
4158
4159 return dpif_netlink_vport_transact(&request, reply, bufp);
4160 }
4161
4162 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4163 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
4164 * positive errno value.
4165 *
4166 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
4167 * while 'dp' is still in use. */
4168 static int
4169 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
4170 {
4171 static const struct nl_policy ovs_datapath_policy[] = {
4172 [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
4173 [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
4174 .optional = true },
4175 [OVS_DP_ATTR_MEGAFLOW_STATS] = {
4176 NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
4177 .optional = true },
4178 };
4179
4180 dpif_netlink_dp_init(dp);
4181
4182 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4183 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4184 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4185 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4186
4187 struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
4188 if (!nlmsg || !genl || !ovs_header
4189 || nlmsg->nlmsg_type != ovs_datapath_family
4190 || !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
4191 ARRAY_SIZE(ovs_datapath_policy))) {
4192 return EINVAL;
4193 }
4194
4195 dp->cmd = genl->cmd;
4196 dp->dp_ifindex = ovs_header->dp_ifindex;
4197 dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
4198 if (a[OVS_DP_ATTR_STATS]) {
4199 dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
4200 }
4201
4202 if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
4203 dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
4204 }
4205
4206 return 0;
4207 }
4208
4209 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
4210 static void
4211 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
4212 {
4213 struct ovs_header *ovs_header;
4214
4215 nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
4216 NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
4217 OVS_DATAPATH_VERSION);
4218
4219 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4220 ovs_header->dp_ifindex = dp->dp_ifindex;
4221
4222 if (dp->name) {
4223 nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
4224 }
4225
4226 if (dp->upcall_pid) {
4227 nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
4228 }
4229
4230 if (dp->user_features) {
4231 nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
4232 }
4233
4234 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
4235 }
4236
4237 /* Clears 'dp' to "empty" values. */
4238 static void
4239 dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
4240 {
4241 memset(dp, 0, sizeof *dp);
4242 }
4243
4244 static void
4245 dpif_netlink_dp_dump_start(struct nl_dump *dump)
4246 {
4247 struct dpif_netlink_dp request;
4248 struct ofpbuf *buf;
4249
4250 dpif_netlink_dp_init(&request);
4251 request.cmd = OVS_DP_CMD_GET;
4252
4253 buf = ofpbuf_new(1024);
4254 dpif_netlink_dp_to_ofpbuf(&request, buf);
4255 nl_dump_start(dump, NETLINK_GENERIC, buf);
4256 ofpbuf_delete(buf);
4257 }
4258
4259 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4260 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4261 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4262 * result of the command is expected to be of the same form, which is decoded
4263 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
4264 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
4265 static int
4266 dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
4267 struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
4268 {
4269 struct ofpbuf *request_buf;
4270 int error;
4271
4272 ovs_assert((reply != NULL) == (bufp != NULL));
4273
4274 request_buf = ofpbuf_new(1024);
4275 dpif_netlink_dp_to_ofpbuf(request, request_buf);
4276 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4277 ofpbuf_delete(request_buf);
4278
4279 if (reply) {
4280 dpif_netlink_dp_init(reply);
4281 if (!error) {
4282 error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
4283 }
4284 if (error) {
4285 ofpbuf_delete(*bufp);
4286 *bufp = NULL;
4287 }
4288 }
4289 return error;
4290 }
4291
4292 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
4293 * The caller must free '*bufp' when the reply is no longer needed ('reply'
4294 * will contain pointers into '*bufp'). */
4295 static int
4296 dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
4297 struct ofpbuf **bufp)
4298 {
4299 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
4300 struct dpif_netlink_dp request;
4301
4302 dpif_netlink_dp_init(&request);
4303 request.cmd = OVS_DP_CMD_GET;
4304 request.dp_ifindex = dpif->dp_ifindex;
4305
4306 return dpif_netlink_dp_transact(&request, reply, bufp);
4307 }
4308
4309 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4310 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
4311 * positive errno value.
4312 *
4313 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
4314 * while 'flow' is still in use. */
4315 static int
4316 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
4317 const struct ofpbuf *buf)
4318 {
4319 static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
4320 [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
4321 [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
4322 [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
4323 [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
4324 .optional = true },
4325 [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
4326 [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
4327 [OVS_FLOW_ATTR_UFID] = { .type = NL_A_U128, .optional = true },
4328 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
4329 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
4330 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
4331 };
4332
4333 dpif_netlink_flow_init(flow);
4334
4335 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4336 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4337 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4338 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4339
4340 struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
4341 if (!nlmsg || !genl || !ovs_header
4342 || nlmsg->nlmsg_type != ovs_flow_family
4343 || !nl_policy_parse(&b, 0, ovs_flow_policy, a,
4344 ARRAY_SIZE(ovs_flow_policy))) {
4345 return EINVAL;
4346 }
4347 if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
4348 return EINVAL;
4349 }
4350
4351 flow->nlmsg_flags = nlmsg->nlmsg_flags;
4352 flow->dp_ifindex = ovs_header->dp_ifindex;
4353 if (a[OVS_FLOW_ATTR_KEY]) {
4354 flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
4355 flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
4356 }
4357
4358 if (a[OVS_FLOW_ATTR_UFID]) {
4359 flow->ufid = nl_attr_get_u128(a[OVS_FLOW_ATTR_UFID]);
4360 flow->ufid_present = true;
4361 }
4362 if (a[OVS_FLOW_ATTR_MASK]) {
4363 flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
4364 flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
4365 }
4366 if (a[OVS_FLOW_ATTR_ACTIONS]) {
4367 flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
4368 flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
4369 }
4370 if (a[OVS_FLOW_ATTR_STATS]) {
4371 flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
4372 }
4373 if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
4374 flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
4375 }
4376 if (a[OVS_FLOW_ATTR_USED]) {
4377 flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
4378 }
4379 return 0;
4380 }
4381
4382
4383 /*
4384 * If PACKET_TYPE attribute is present in 'data', it filters PACKET_TYPE out.
4385 * If the flow is not Ethernet, the OVS_KEY_ATTR_PACKET_TYPE is converted to
4386 * OVS_KEY_ATTR_ETHERTYPE. Puts 'data' to 'buf'.
4387 */
4388 static void
4389 put_exclude_packet_type(struct ofpbuf *buf, uint16_t type,
4390 const struct nlattr *data, uint16_t data_len)
4391 {
4392 const struct nlattr *packet_type;
4393
4394 packet_type = nl_attr_find__(data, data_len, OVS_KEY_ATTR_PACKET_TYPE);
4395
4396 if (packet_type) {
4397 /* exclude PACKET_TYPE Netlink attribute. */
4398 ovs_assert(NLA_ALIGN(packet_type->nla_len) == NL_A_U32_SIZE);
4399 size_t packet_type_len = NL_A_U32_SIZE;
4400 size_t first_chunk_size = (uint8_t *)packet_type - (uint8_t *)data;
4401 size_t second_chunk_size = data_len - first_chunk_size
4402 - packet_type_len;
4403 struct nlattr *next_attr = nl_attr_next(packet_type);
4404 size_t ofs;
4405
4406 ofs = nl_msg_start_nested(buf, type);
4407 nl_msg_put(buf, data, first_chunk_size);
4408 nl_msg_put(buf, next_attr, second_chunk_size);
4409 if (!nl_attr_find__(data, data_len, OVS_KEY_ATTR_ETHERNET)) {
4410 ovs_be16 pt = pt_ns_type_be(nl_attr_get_be32(packet_type));
4411 const struct nlattr *nla;
4412
4413 nla = nl_attr_find(buf, ofs + NLA_HDRLEN, OVS_KEY_ATTR_ETHERTYPE);
4414 if (nla) {
4415 ovs_be16 *ethertype;
4416
4417 ethertype = CONST_CAST(ovs_be16 *, nl_attr_get(nla));
4418 *ethertype = pt;
4419 } else {
4420 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, pt);
4421 }
4422 }
4423 nl_msg_end_nested(buf, ofs);
4424 } else {
4425 nl_msg_put_unspec(buf, type, data, data_len);
4426 }
4427 }
4428
4429 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
4430 * followed by Netlink attributes corresponding to 'flow'. */
4431 static void
4432 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
4433 struct ofpbuf *buf)
4434 {
4435 struct ovs_header *ovs_header;
4436
4437 nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
4438 NLM_F_REQUEST | flow->nlmsg_flags,
4439 flow->cmd, OVS_FLOW_VERSION);
4440
4441 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4442 ovs_header->dp_ifindex = flow->dp_ifindex;
4443
4444 if (flow->ufid_present) {
4445 nl_msg_put_u128(buf, OVS_FLOW_ATTR_UFID, flow->ufid);
4446 }
4447 if (flow->ufid_terse) {
4448 nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
4449 OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
4450 | OVS_UFID_F_OMIT_ACTIONS);
4451 }
4452 if (!flow->ufid_terse || !flow->ufid_present) {
4453 if (flow->key_len) {
4454 put_exclude_packet_type(buf, OVS_FLOW_ATTR_KEY, flow->key,
4455 flow->key_len);
4456 }
4457 if (flow->mask_len) {
4458 put_exclude_packet_type(buf, OVS_FLOW_ATTR_MASK, flow->mask,
4459 flow->mask_len);
4460 }
4461 if (flow->actions || flow->actions_len) {
4462 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
4463 flow->actions, flow->actions_len);
4464 }
4465 }
4466
4467 /* We never need to send these to the kernel. */
4468 ovs_assert(!flow->stats);
4469 ovs_assert(!flow->tcp_flags);
4470 ovs_assert(!flow->used);
4471
4472 if (flow->clear) {
4473 nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
4474 }
4475 if (flow->probe) {
4476 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
4477 }
4478 }
4479
4480 /* Clears 'flow' to "empty" values. */
4481 static void
4482 dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
4483 {
4484 memset(flow, 0, sizeof *flow);
4485 }
4486
4487 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4488 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4489 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4490 * result of the command is expected to be a flow also, which is decoded and
4491 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
4492 * is no longer needed ('reply' will contain pointers into '*bufp'). */
4493 static int
4494 dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
4495 struct dpif_netlink_flow *reply,
4496 struct ofpbuf **bufp)
4497 {
4498 struct ofpbuf *request_buf;
4499 int error;
4500
4501 ovs_assert((reply != NULL) == (bufp != NULL));
4502
4503 if (reply) {
4504 request->nlmsg_flags |= NLM_F_ECHO;
4505 }
4506
4507 request_buf = ofpbuf_new(1024);
4508 dpif_netlink_flow_to_ofpbuf(request, request_buf);
4509 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4510 ofpbuf_delete(request_buf);
4511
4512 if (reply) {
4513 if (!error) {
4514 error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
4515 }
4516 if (error) {
4517 dpif_netlink_flow_init(reply);
4518 ofpbuf_delete(*bufp);
4519 *bufp = NULL;
4520 }
4521 }
4522 return error;
4523 }
4524
4525 static void
4526 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
4527 struct dpif_flow_stats *stats)
4528 {
4529 if (flow->stats) {
4530 stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
4531 stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
4532 } else {
4533 stats->n_packets = 0;
4534 stats->n_bytes = 0;
4535 }
4536 stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
4537 stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
4538 }
4539
4540 /* Logs information about a packet that was recently lost in 'ch' (in
4541 * 'dpif_'). */
4542 static void
4543 report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
4544 uint32_t handler_id)
4545 {
4546 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
4547 struct ds s;
4548
4549 if (VLOG_DROP_WARN(&rl)) {
4550 return;
4551 }
4552
4553 ds_init(&s);
4554 if (ch->last_poll != LLONG_MIN) {
4555 ds_put_format(&s, " (last polled %lld ms ago)",
4556 time_msec() - ch->last_poll);
4557 }
4558
4559 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
4560 dpif_name(&dpif->dpif), ch_idx, handler_id);
4561 ds_destroy(&s);
4562 }