]> git.proxmox.com Git - ovs.git/blob - lib/dpif-netlink.c
ofproto-dpif-upcall: Echo HASH attribute back to datapath.
[ovs.git] / lib / dpif-netlink.c
1 /*
2 * Copyright (c) 2008-2018 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dpif-netlink.h"
20
21 #include <ctype.h>
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <net/if.h>
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
28 #include <poll.h>
29 #include <stdlib.h>
30 #include <strings.h>
31 #include <sys/epoll.h>
32 #include <sys/stat.h>
33 #include <unistd.h>
34
35 #include "bitmap.h"
36 #include "dpif-netlink-rtnl.h"
37 #include "dpif-provider.h"
38 #include "fat-rwlock.h"
39 #include "flow.h"
40 #include "netdev-linux.h"
41 #include "netdev-offload.h"
42 #include "netdev-provider.h"
43 #include "netdev-vport.h"
44 #include "netdev.h"
45 #include "netlink-conntrack.h"
46 #include "netlink-notifier.h"
47 #include "netlink-socket.h"
48 #include "netlink.h"
49 #include "netnsid.h"
50 #include "odp-util.h"
51 #include "openvswitch/dynamic-string.h"
52 #include "openvswitch/flow.h"
53 #include "openvswitch/hmap.h"
54 #include "openvswitch/match.h"
55 #include "openvswitch/ofpbuf.h"
56 #include "openvswitch/poll-loop.h"
57 #include "openvswitch/shash.h"
58 #include "openvswitch/thread.h"
59 #include "openvswitch/vlog.h"
60 #include "packets.h"
61 #include "random.h"
62 #include "sset.h"
63 #include "timeval.h"
64 #include "unaligned.h"
65 #include "util.h"
66
67 VLOG_DEFINE_THIS_MODULE(dpif_netlink);
68 #ifdef _WIN32
69 #include "wmi.h"
70 enum { WINDOWS = 1 };
71 #else
72 enum { WINDOWS = 0 };
73 #endif
74 enum { MAX_PORTS = USHRT_MAX };
75
76 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
77 * missing if we have old headers. */
78 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
79
80 #define FLOW_DUMP_MAX_BATCH 50
81 #define OPERATE_MAX_OPS 50
82
83 #ifndef EPOLLEXCLUSIVE
84 #define EPOLLEXCLUSIVE (1u << 28)
85 #endif
86
87 struct dpif_netlink_dp {
88 /* Generic Netlink header. */
89 uint8_t cmd;
90
91 /* struct ovs_header. */
92 int dp_ifindex;
93
94 /* Attributes. */
95 const char *name; /* OVS_DP_ATTR_NAME. */
96 const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
97 uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
98 const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
99 const struct ovs_dp_megaflow_stats *megaflow_stats;
100 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
101 };
102
103 static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
104 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
105 const struct ofpbuf *);
106 static void dpif_netlink_dp_dump_start(struct nl_dump *);
107 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
108 struct dpif_netlink_dp *reply,
109 struct ofpbuf **bufp);
110 static int dpif_netlink_dp_get(const struct dpif *,
111 struct dpif_netlink_dp *reply,
112 struct ofpbuf **bufp);
113
114 struct dpif_netlink_flow {
115 /* Generic Netlink header. */
116 uint8_t cmd;
117
118 /* struct ovs_header. */
119 unsigned int nlmsg_flags;
120 int dp_ifindex;
121
122 /* Attributes.
123 *
124 * The 'stats' member points to 64-bit data that might only be aligned on
125 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
126 * values.
127 *
128 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
129 * the Netlink version of the command, even if actions_len is zero. */
130 const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
131 size_t key_len;
132 const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
133 size_t mask_len;
134 const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
135 size_t actions_len;
136 ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
137 bool ufid_present; /* Is there a UFID? */
138 bool ufid_terse; /* Skip serializing key/mask/acts? */
139 const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
140 const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
141 const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
142 bool clear; /* OVS_FLOW_ATTR_CLEAR. */
143 bool probe; /* OVS_FLOW_ATTR_PROBE. */
144 };
145
146 static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
147 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
148 const struct ofpbuf *);
149 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
150 struct ofpbuf *);
151 static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
152 struct dpif_netlink_flow *reply,
153 struct ofpbuf **bufp);
154 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
155 struct dpif_flow_stats *);
156 static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
157 const struct dpif_netlink_flow *);
158
159 /* One of the dpif channels between the kernel and userspace. */
160 struct dpif_channel {
161 struct nl_sock *sock; /* Netlink socket. */
162 long long int last_poll; /* Last time this channel was polled. */
163 };
164
165 #ifdef _WIN32
166 #define VPORT_SOCK_POOL_SIZE 1
167 /* On Windows, there is no native support for epoll. There are equivalent
168 * interfaces though, that are not used currently. For simpicity, a pool of
169 * netlink sockets is used. Each socket is represented by 'struct
170 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
171 * sharing the same socket. In the future, we can add a reference count and
172 * such fields. */
173 struct dpif_windows_vport_sock {
174 struct nl_sock *nl_sock; /* netlink socket. */
175 };
176 #endif
177
178 struct dpif_handler {
179 struct epoll_event *epoll_events;
180 int epoll_fd; /* epoll fd that includes channel socks. */
181 int n_events; /* Num events returned by epoll_wait(). */
182 int event_offset; /* Offset into 'epoll_events'. */
183
184 #ifdef _WIN32
185 /* Pool of sockets. */
186 struct dpif_windows_vport_sock *vport_sock_pool;
187 size_t last_used_pool_idx; /* Index to aid in allocating a
188 socket in the pool to a port. */
189 #endif
190 };
191
192 /* Datapath interface for the openvswitch Linux kernel module. */
193 struct dpif_netlink {
194 struct dpif dpif;
195 int dp_ifindex;
196
197 /* Upcall messages. */
198 struct fat_rwlock upcall_lock;
199 struct dpif_handler *handlers;
200 uint32_t n_handlers; /* Num of upcall handlers. */
201 struct dpif_channel *channels; /* Array of channels for each port. */
202 int uc_array_size; /* Size of 'handler->channels' and */
203 /* 'handler->epoll_events'. */
204
205 /* Change notification. */
206 struct nl_sock *port_notifier; /* vport multicast group subscriber. */
207 bool refresh_channels;
208 };
209
210 static void report_loss(struct dpif_netlink *, struct dpif_channel *,
211 uint32_t ch_idx, uint32_t handler_id);
212
213 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
214
215 /* Generic Netlink family numbers for OVS.
216 *
217 * Initialized by dpif_netlink_init(). */
218 static int ovs_datapath_family;
219 static int ovs_vport_family;
220 static int ovs_flow_family;
221 static int ovs_packet_family;
222 static int ovs_meter_family;
223 static int ovs_ct_limit_family;
224
225 /* Generic Netlink multicast groups for OVS.
226 *
227 * Initialized by dpif_netlink_init(). */
228 static unsigned int ovs_vport_mcgroup;
229
230 /* If true, tunnel devices are created using OVS compat/genetlink.
231 * If false, tunnel devices are created with rtnetlink and using light weight
232 * tunnels. If we fail to create the tunnel the rtnetlink+LWT, then we fallback
233 * to using the compat interface. */
234 static bool ovs_tunnels_out_of_tree = true;
235
236 static int dpif_netlink_init(void);
237 static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
238 static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
239 odp_port_t port_no);
240 static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
241 static int dpif_netlink_refresh_channels(struct dpif_netlink *,
242 uint32_t n_handlers);
243 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
244 struct ofpbuf *);
245 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
246 const struct ofpbuf *);
247 static int dpif_netlink_port_query__(const struct dpif_netlink *dpif,
248 odp_port_t port_no, const char *port_name,
249 struct dpif_port *dpif_port);
250
251 static int
252 create_nl_sock(struct dpif_netlink *dpif OVS_UNUSED, struct nl_sock **sockp)
253 OVS_REQ_WRLOCK(dpif->upcall_lock)
254 {
255 #ifndef _WIN32
256 return nl_sock_create(NETLINK_GENERIC, sockp);
257 #else
258 /* Pick netlink sockets to use in a round-robin fashion from each
259 * handler's pool of sockets. */
260 struct dpif_handler *handler = &dpif->handlers[0];
261 struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
262 size_t index = handler->last_used_pool_idx;
263
264 /* A pool of sockets is allocated when the handler is initialized. */
265 if (sock_pool == NULL) {
266 *sockp = NULL;
267 return EINVAL;
268 }
269
270 ovs_assert(index < VPORT_SOCK_POOL_SIZE);
271 *sockp = sock_pool[index].nl_sock;
272 ovs_assert(*sockp);
273 index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
274 handler->last_used_pool_idx = index;
275 return 0;
276 #endif
277 }
278
279 static void
280 close_nl_sock(struct nl_sock *sock)
281 {
282 #ifndef _WIN32
283 nl_sock_destroy(sock);
284 #endif
285 }
286
287 static struct dpif_netlink *
288 dpif_netlink_cast(const struct dpif *dpif)
289 {
290 dpif_assert_class(dpif, &dpif_netlink_class);
291 return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
292 }
293
294 static int
295 dpif_netlink_enumerate(struct sset *all_dps,
296 const struct dpif_class *dpif_class OVS_UNUSED)
297 {
298 struct nl_dump dump;
299 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
300 struct ofpbuf msg, buf;
301 int error;
302
303 error = dpif_netlink_init();
304 if (error) {
305 return error;
306 }
307
308 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
309 dpif_netlink_dp_dump_start(&dump);
310 while (nl_dump_next(&dump, &msg, &buf)) {
311 struct dpif_netlink_dp dp;
312
313 if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
314 sset_add(all_dps, dp.name);
315 }
316 }
317 ofpbuf_uninit(&buf);
318 return nl_dump_done(&dump);
319 }
320
321 static int
322 dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
323 bool create, struct dpif **dpifp)
324 {
325 struct dpif_netlink_dp dp_request, dp;
326 struct ofpbuf *buf;
327 uint32_t upcall_pid;
328 int error;
329
330 error = dpif_netlink_init();
331 if (error) {
332 return error;
333 }
334
335 /* Create or look up datapath. */
336 dpif_netlink_dp_init(&dp_request);
337 if (create) {
338 dp_request.cmd = OVS_DP_CMD_NEW;
339 upcall_pid = 0;
340 dp_request.upcall_pid = &upcall_pid;
341 } else {
342 /* Use OVS_DP_CMD_SET to report user features */
343 dp_request.cmd = OVS_DP_CMD_SET;
344 }
345 dp_request.name = name;
346 dp_request.user_features |= OVS_DP_F_UNALIGNED;
347 dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
348 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
349 if (error) {
350 return error;
351 }
352
353 error = open_dpif(&dp, dpifp);
354 ofpbuf_delete(buf);
355 return error;
356 }
357
358 static int
359 open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
360 {
361 struct dpif_netlink *dpif;
362
363 dpif = xzalloc(sizeof *dpif);
364 dpif->port_notifier = NULL;
365 fat_rwlock_init(&dpif->upcall_lock);
366
367 dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
368 dp->dp_ifindex, dp->dp_ifindex);
369
370 dpif->dp_ifindex = dp->dp_ifindex;
371 *dpifp = &dpif->dpif;
372
373 return 0;
374 }
375
376 #ifdef _WIN32
377 static void
378 vport_delete_sock_pool(struct dpif_handler *handler)
379 OVS_REQ_WRLOCK(dpif->upcall_lock)
380 {
381 if (handler->vport_sock_pool) {
382 uint32_t i;
383 struct dpif_windows_vport_sock *sock_pool =
384 handler->vport_sock_pool;
385
386 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
387 if (sock_pool[i].nl_sock) {
388 nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
389 nl_sock_destroy(sock_pool[i].nl_sock);
390 sock_pool[i].nl_sock = NULL;
391 }
392 }
393
394 free(handler->vport_sock_pool);
395 handler->vport_sock_pool = NULL;
396 }
397 }
398
399 static int
400 vport_create_sock_pool(struct dpif_handler *handler)
401 OVS_REQ_WRLOCK(dpif->upcall_lock)
402 {
403 struct dpif_windows_vport_sock *sock_pool;
404 size_t i;
405 int error = 0;
406
407 sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
408 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
409 error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
410 if (error) {
411 goto error;
412 }
413
414 /* Enable the netlink socket to receive packets. This is equivalent to
415 * calling nl_sock_join_mcgroup() to receive events. */
416 error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
417 if (error) {
418 goto error;
419 }
420 }
421
422 handler->vport_sock_pool = sock_pool;
423 handler->last_used_pool_idx = 0;
424 return 0;
425
426 error:
427 vport_delete_sock_pool(handler);
428 return error;
429 }
430 #endif /* _WIN32 */
431
432 /* Given the port number 'port_idx', extracts the pid of netlink socket
433 * associated to the port and assigns it to 'upcall_pid'. */
434 static bool
435 vport_get_pid(struct dpif_netlink *dpif, uint32_t port_idx,
436 uint32_t *upcall_pid)
437 {
438 /* Since the nl_sock can only be assigned in either all
439 * or none "dpif" channels, the following check
440 * would suffice. */
441 if (!dpif->channels[port_idx].sock) {
442 return false;
443 }
444 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
445
446 *upcall_pid = nl_sock_pid(dpif->channels[port_idx].sock);
447
448 return true;
449 }
450
451 static int
452 vport_add_channel(struct dpif_netlink *dpif, odp_port_t port_no,
453 struct nl_sock *sock)
454 {
455 struct epoll_event event;
456 uint32_t port_idx = odp_to_u32(port_no);
457 size_t i;
458 int error;
459
460 if (dpif->handlers == NULL) {
461 close_nl_sock(sock);
462 return 0;
463 }
464
465 /* We assume that the datapath densely chooses port numbers, which can
466 * therefore be used as an index into 'channels' and 'epoll_events' of
467 * 'dpif'. */
468 if (port_idx >= dpif->uc_array_size) {
469 uint32_t new_size = port_idx + 1;
470
471 if (new_size > MAX_PORTS) {
472 VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
473 dpif_name(&dpif->dpif), port_no);
474 return EFBIG;
475 }
476
477 dpif->channels = xrealloc(dpif->channels,
478 new_size * sizeof *dpif->channels);
479
480 for (i = dpif->uc_array_size; i < new_size; i++) {
481 dpif->channels[i].sock = NULL;
482 }
483
484 for (i = 0; i < dpif->n_handlers; i++) {
485 struct dpif_handler *handler = &dpif->handlers[i];
486
487 handler->epoll_events = xrealloc(handler->epoll_events,
488 new_size * sizeof *handler->epoll_events);
489
490 }
491 dpif->uc_array_size = new_size;
492 }
493
494 memset(&event, 0, sizeof event);
495 event.events = EPOLLIN | EPOLLEXCLUSIVE;
496 event.data.u32 = port_idx;
497
498 for (i = 0; i < dpif->n_handlers; i++) {
499 struct dpif_handler *handler = &dpif->handlers[i];
500
501 #ifndef _WIN32
502 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(sock),
503 &event) < 0) {
504 error = errno;
505 goto error;
506 }
507 #endif
508 }
509 dpif->channels[port_idx].sock = sock;
510 dpif->channels[port_idx].last_poll = LLONG_MIN;
511
512 return 0;
513
514 error:
515 #ifndef _WIN32
516 while (i--) {
517 epoll_ctl(dpif->handlers[i].epoll_fd, EPOLL_CTL_DEL,
518 nl_sock_fd(sock), NULL);
519 }
520 #endif
521 dpif->channels[port_idx].sock = NULL;
522
523 return error;
524 }
525
526 static void
527 vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
528 {
529 uint32_t port_idx = odp_to_u32(port_no);
530 size_t i;
531
532 if (!dpif->handlers || port_idx >= dpif->uc_array_size
533 || !dpif->channels[port_idx].sock) {
534 return;
535 }
536
537 for (i = 0; i < dpif->n_handlers; i++) {
538 struct dpif_handler *handler = &dpif->handlers[i];
539 #ifndef _WIN32
540 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
541 nl_sock_fd(dpif->channels[port_idx].sock), NULL);
542 #endif
543 handler->event_offset = handler->n_events = 0;
544 }
545 #ifndef _WIN32
546 nl_sock_destroy(dpif->channels[port_idx].sock);
547 #endif
548 dpif->channels[port_idx].sock = NULL;
549 }
550
551 static void
552 destroy_all_channels(struct dpif_netlink *dpif)
553 OVS_REQ_WRLOCK(dpif->upcall_lock)
554 {
555 unsigned int i;
556
557 if (!dpif->handlers) {
558 return;
559 }
560
561 for (i = 0; i < dpif->uc_array_size; i++ ) {
562 struct dpif_netlink_vport vport_request;
563 uint32_t upcall_pids = 0;
564
565 if (!dpif->channels[i].sock) {
566 continue;
567 }
568
569 /* Turn off upcalls. */
570 dpif_netlink_vport_init(&vport_request);
571 vport_request.cmd = OVS_VPORT_CMD_SET;
572 vport_request.dp_ifindex = dpif->dp_ifindex;
573 vport_request.port_no = u32_to_odp(i);
574 vport_request.n_upcall_pids = 1;
575 vport_request.upcall_pids = &upcall_pids;
576 dpif_netlink_vport_transact(&vport_request, NULL, NULL);
577
578 vport_del_channels(dpif, u32_to_odp(i));
579 }
580
581 for (i = 0; i < dpif->n_handlers; i++) {
582 struct dpif_handler *handler = &dpif->handlers[i];
583
584 dpif_netlink_handler_uninit(handler);
585 free(handler->epoll_events);
586 }
587 free(dpif->channels);
588 free(dpif->handlers);
589 dpif->handlers = NULL;
590 dpif->channels = NULL;
591 dpif->n_handlers = 0;
592 dpif->uc_array_size = 0;
593 }
594
595 static void
596 dpif_netlink_close(struct dpif *dpif_)
597 {
598 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
599
600 nl_sock_destroy(dpif->port_notifier);
601
602 fat_rwlock_wrlock(&dpif->upcall_lock);
603 destroy_all_channels(dpif);
604 fat_rwlock_unlock(&dpif->upcall_lock);
605
606 fat_rwlock_destroy(&dpif->upcall_lock);
607 free(dpif);
608 }
609
610 static int
611 dpif_netlink_destroy(struct dpif *dpif_)
612 {
613 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
614 struct dpif_netlink_dp dp;
615
616 dpif_netlink_dp_init(&dp);
617 dp.cmd = OVS_DP_CMD_DEL;
618 dp.dp_ifindex = dpif->dp_ifindex;
619 return dpif_netlink_dp_transact(&dp, NULL, NULL);
620 }
621
622 static bool
623 dpif_netlink_run(struct dpif *dpif_)
624 {
625 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
626
627 if (dpif->refresh_channels) {
628 dpif->refresh_channels = false;
629 fat_rwlock_wrlock(&dpif->upcall_lock);
630 dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
631 fat_rwlock_unlock(&dpif->upcall_lock);
632 }
633 return false;
634 }
635
636 static int
637 dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
638 {
639 struct dpif_netlink_dp dp;
640 struct ofpbuf *buf;
641 int error;
642
643 error = dpif_netlink_dp_get(dpif_, &dp, &buf);
644 if (!error) {
645 memset(stats, 0, sizeof *stats);
646
647 if (dp.stats) {
648 stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
649 stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
650 stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
651 stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
652 }
653
654 if (dp.megaflow_stats) {
655 stats->n_masks = dp.megaflow_stats->n_masks;
656 stats->n_mask_hit = get_32aligned_u64(
657 &dp.megaflow_stats->n_mask_hit);
658 } else {
659 stats->n_masks = UINT32_MAX;
660 stats->n_mask_hit = UINT64_MAX;
661 }
662 ofpbuf_delete(buf);
663 }
664 return error;
665 }
666
667 static const char *
668 get_vport_type(const struct dpif_netlink_vport *vport)
669 {
670 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
671
672 switch (vport->type) {
673 case OVS_VPORT_TYPE_NETDEV: {
674 const char *type = netdev_get_type_from_name(vport->name);
675
676 return type ? type : "system";
677 }
678
679 case OVS_VPORT_TYPE_INTERNAL:
680 return "internal";
681
682 case OVS_VPORT_TYPE_GENEVE:
683 return "geneve";
684
685 case OVS_VPORT_TYPE_GRE:
686 return "gre";
687
688 case OVS_VPORT_TYPE_VXLAN:
689 return "vxlan";
690
691 case OVS_VPORT_TYPE_LISP:
692 return "lisp";
693
694 case OVS_VPORT_TYPE_STT:
695 return "stt";
696
697 case OVS_VPORT_TYPE_ERSPAN:
698 return "erspan";
699
700 case OVS_VPORT_TYPE_IP6ERSPAN:
701 return "ip6erspan";
702
703 case OVS_VPORT_TYPE_IP6GRE:
704 return "ip6gre";
705
706 case OVS_VPORT_TYPE_UNSPEC:
707 case __OVS_VPORT_TYPE_MAX:
708 break;
709 }
710
711 VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
712 vport->dp_ifindex, vport->name, (unsigned int) vport->type);
713 return "unknown";
714 }
715
716 enum ovs_vport_type
717 netdev_to_ovs_vport_type(const char *type)
718 {
719 if (!strcmp(type, "tap") || !strcmp(type, "system")) {
720 return OVS_VPORT_TYPE_NETDEV;
721 } else if (!strcmp(type, "internal")) {
722 return OVS_VPORT_TYPE_INTERNAL;
723 } else if (strstr(type, "stt")) {
724 return OVS_VPORT_TYPE_STT;
725 } else if (!strcmp(type, "geneve")) {
726 return OVS_VPORT_TYPE_GENEVE;
727 } else if (!strcmp(type, "vxlan")) {
728 return OVS_VPORT_TYPE_VXLAN;
729 } else if (!strcmp(type, "lisp")) {
730 return OVS_VPORT_TYPE_LISP;
731 } else if (!strcmp(type, "erspan")) {
732 return OVS_VPORT_TYPE_ERSPAN;
733 } else if (!strcmp(type, "ip6erspan")) {
734 return OVS_VPORT_TYPE_IP6ERSPAN;
735 } else if (!strcmp(type, "ip6gre")) {
736 return OVS_VPORT_TYPE_IP6GRE;
737 } else if (!strcmp(type, "gre")) {
738 return OVS_VPORT_TYPE_GRE;
739 } else {
740 return OVS_VPORT_TYPE_UNSPEC;
741 }
742 }
743
744 static int
745 dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name,
746 enum ovs_vport_type type,
747 struct ofpbuf *options,
748 odp_port_t *port_nop)
749 OVS_REQ_WRLOCK(dpif->upcall_lock)
750 {
751 struct dpif_netlink_vport request, reply;
752 struct ofpbuf *buf;
753 struct nl_sock *sock = NULL;
754 uint32_t upcall_pids = 0;
755 int error = 0;
756
757 if (dpif->handlers) {
758 error = create_nl_sock(dpif, &sock);
759 if (error) {
760 return error;
761 }
762 }
763
764 dpif_netlink_vport_init(&request);
765 request.cmd = OVS_VPORT_CMD_NEW;
766 request.dp_ifindex = dpif->dp_ifindex;
767 request.type = type;
768 request.name = name;
769
770 request.port_no = *port_nop;
771 if (sock) {
772 upcall_pids = nl_sock_pid(sock);
773 }
774 request.n_upcall_pids = 1;
775 request.upcall_pids = &upcall_pids;
776
777 if (options) {
778 request.options = options->data;
779 request.options_len = options->size;
780 }
781
782 error = dpif_netlink_vport_transact(&request, &reply, &buf);
783 if (!error) {
784 *port_nop = reply.port_no;
785 } else {
786 if (error == EBUSY && *port_nop != ODPP_NONE) {
787 VLOG_INFO("%s: requested port %"PRIu32" is in use",
788 dpif_name(&dpif->dpif), *port_nop);
789 }
790
791 close_nl_sock(sock);
792 goto exit;
793 }
794
795 error = vport_add_channel(dpif, *port_nop, sock);
796 if (error) {
797 VLOG_INFO("%s: could not add channel for port %s",
798 dpif_name(&dpif->dpif), name);
799
800 /* Delete the port. */
801 dpif_netlink_vport_init(&request);
802 request.cmd = OVS_VPORT_CMD_DEL;
803 request.dp_ifindex = dpif->dp_ifindex;
804 request.port_no = *port_nop;
805 dpif_netlink_vport_transact(&request, NULL, NULL);
806 close_nl_sock(sock);
807 goto exit;
808 }
809
810 exit:
811 ofpbuf_delete(buf);
812
813 return error;
814 }
815
816 static int
817 dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev,
818 odp_port_t *port_nop)
819 OVS_REQ_WRLOCK(dpif->upcall_lock)
820 {
821 const struct netdev_tunnel_config *tnl_cfg;
822 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
823 const char *type = netdev_get_type(netdev);
824 uint64_t options_stub[64 / 8];
825 enum ovs_vport_type ovs_type;
826 struct ofpbuf options;
827 const char *name;
828
829 name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
830
831 ovs_type = netdev_to_ovs_vport_type(netdev_get_type(netdev));
832 if (ovs_type == OVS_VPORT_TYPE_UNSPEC) {
833 VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
834 "unsupported type `%s'",
835 dpif_name(&dpif->dpif), name, type);
836 return EINVAL;
837 }
838
839 if (ovs_type == OVS_VPORT_TYPE_NETDEV) {
840 #ifdef _WIN32
841 /* XXX : Map appropiate Windows handle */
842 #else
843 netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
844 #endif
845 }
846
847 #ifdef _WIN32
848 if (ovs_type == OVS_VPORT_TYPE_INTERNAL) {
849 if (!create_wmi_port(name)){
850 VLOG_ERR("Could not create wmi internal port with name:%s", name);
851 return EINVAL;
852 };
853 }
854 #endif
855
856 tnl_cfg = netdev_get_tunnel_config(netdev);
857 if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) {
858 ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
859 if (tnl_cfg->dst_port) {
860 nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
861 ntohs(tnl_cfg->dst_port));
862 }
863 if (tnl_cfg->exts) {
864 size_t ext_ofs;
865 int i;
866
867 ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
868 for (i = 0; i < 32; i++) {
869 if (tnl_cfg->exts & (1 << i)) {
870 nl_msg_put_flag(&options, i);
871 }
872 }
873 nl_msg_end_nested(&options, ext_ofs);
874 }
875 return dpif_netlink_port_add__(dpif, name, ovs_type, &options,
876 port_nop);
877 } else {
878 return dpif_netlink_port_add__(dpif, name, ovs_type, NULL, port_nop);
879 }
880
881 }
882
883 static int
884 dpif_netlink_rtnl_port_create_and_add(struct dpif_netlink *dpif,
885 struct netdev *netdev,
886 odp_port_t *port_nop)
887 OVS_REQ_WRLOCK(dpif->upcall_lock)
888 {
889 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
890 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
891 const char *name;
892 int error;
893
894 error = dpif_netlink_rtnl_port_create(netdev);
895 if (error) {
896 if (error != EOPNOTSUPP) {
897 VLOG_WARN_RL(&rl, "Failed to create %s with rtnetlink: %s",
898 netdev_get_name(netdev), ovs_strerror(error));
899 }
900 return error;
901 }
902
903 name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
904 error = dpif_netlink_port_add__(dpif, name, OVS_VPORT_TYPE_NETDEV, NULL,
905 port_nop);
906 if (error) {
907 dpif_netlink_rtnl_port_destroy(name, netdev_get_type(netdev));
908 }
909 return error;
910 }
911
912 static int
913 dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
914 odp_port_t *port_nop)
915 {
916 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
917 int error = EOPNOTSUPP;
918
919 fat_rwlock_wrlock(&dpif->upcall_lock);
920 if (!ovs_tunnels_out_of_tree) {
921 error = dpif_netlink_rtnl_port_create_and_add(dpif, netdev, port_nop);
922 }
923 if (error) {
924 error = dpif_netlink_port_add_compat(dpif, netdev, port_nop);
925 }
926 fat_rwlock_unlock(&dpif->upcall_lock);
927
928 return error;
929 }
930
931 static int
932 dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
933 OVS_REQ_WRLOCK(dpif->upcall_lock)
934 {
935 struct dpif_netlink_vport vport;
936 struct dpif_port dpif_port;
937 int error;
938
939 error = dpif_netlink_port_query__(dpif, port_no, NULL, &dpif_port);
940 if (error) {
941 return error;
942 }
943
944 dpif_netlink_vport_init(&vport);
945 vport.cmd = OVS_VPORT_CMD_DEL;
946 vport.dp_ifindex = dpif->dp_ifindex;
947 vport.port_no = port_no;
948 #ifdef _WIN32
949 if (!strcmp(dpif_port.type, "internal")) {
950 if (!delete_wmi_port(dpif_port.name)) {
951 VLOG_ERR("Could not delete wmi port with name: %s",
952 dpif_port.name);
953 };
954 }
955 #endif
956 error = dpif_netlink_vport_transact(&vport, NULL, NULL);
957
958 vport_del_channels(dpif, port_no);
959
960 if (!error && !ovs_tunnels_out_of_tree) {
961 error = dpif_netlink_rtnl_port_destroy(dpif_port.name, dpif_port.type);
962 if (error == EOPNOTSUPP) {
963 error = 0;
964 }
965 }
966
967 dpif_port_destroy(&dpif_port);
968
969 return error;
970 }
971
972 static int
973 dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
974 {
975 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
976 int error;
977
978 fat_rwlock_wrlock(&dpif->upcall_lock);
979 error = dpif_netlink_port_del__(dpif, port_no);
980 fat_rwlock_unlock(&dpif->upcall_lock);
981
982 return error;
983 }
984
985 static int
986 dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
987 const char *port_name, struct dpif_port *dpif_port)
988 {
989 struct dpif_netlink_vport request;
990 struct dpif_netlink_vport reply;
991 struct ofpbuf *buf;
992 int error;
993
994 dpif_netlink_vport_init(&request);
995 request.cmd = OVS_VPORT_CMD_GET;
996 request.dp_ifindex = dpif->dp_ifindex;
997 request.port_no = port_no;
998 request.name = port_name;
999
1000 error = dpif_netlink_vport_transact(&request, &reply, &buf);
1001 if (!error) {
1002 if (reply.dp_ifindex != request.dp_ifindex) {
1003 /* A query by name reported that 'port_name' is in some datapath
1004 * other than 'dpif', but the caller wants to know about 'dpif'. */
1005 error = ENODEV;
1006 } else if (dpif_port) {
1007 dpif_port->name = xstrdup(reply.name);
1008 dpif_port->type = xstrdup(get_vport_type(&reply));
1009 dpif_port->port_no = reply.port_no;
1010 }
1011 ofpbuf_delete(buf);
1012 }
1013 return error;
1014 }
1015
1016 static int
1017 dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
1018 struct dpif_port *dpif_port)
1019 {
1020 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1021
1022 return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
1023 }
1024
1025 static int
1026 dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
1027 struct dpif_port *dpif_port)
1028 {
1029 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1030
1031 return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
1032 }
1033
1034 static uint32_t
1035 dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
1036 odp_port_t port_no)
1037 OVS_REQ_RDLOCK(dpif->upcall_lock)
1038 {
1039 uint32_t port_idx = odp_to_u32(port_no);
1040 uint32_t pid = 0;
1041
1042 if (dpif->handlers && dpif->uc_array_size > 0) {
1043 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1044 * channel, since it is not heavily loaded. */
1045 uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
1046
1047 /* Needs to check in case the socket pointer is changed in between
1048 * the holding of upcall_lock. A known case happens when the main
1049 * thread deletes the vport while the handler thread is handling
1050 * the upcall from that port. */
1051 if (dpif->channels[idx].sock) {
1052 pid = nl_sock_pid(dpif->channels[idx].sock);
1053 }
1054 }
1055
1056 return pid;
1057 }
1058
1059 static uint32_t
1060 dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no)
1061 {
1062 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1063 uint32_t ret;
1064
1065 fat_rwlock_rdlock(&dpif->upcall_lock);
1066 ret = dpif_netlink_port_get_pid__(dpif, port_no);
1067 fat_rwlock_unlock(&dpif->upcall_lock);
1068
1069 return ret;
1070 }
1071
1072 static int
1073 dpif_netlink_flow_flush(struct dpif *dpif_)
1074 {
1075 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1076 struct dpif_netlink_flow flow;
1077
1078 dpif_netlink_flow_init(&flow);
1079 flow.cmd = OVS_FLOW_CMD_DEL;
1080 flow.dp_ifindex = dpif->dp_ifindex;
1081
1082 if (netdev_is_flow_api_enabled()) {
1083 netdev_ports_flow_flush(dpif_->dpif_class);
1084 }
1085
1086 return dpif_netlink_flow_transact(&flow, NULL, NULL);
1087 }
1088
1089 struct dpif_netlink_port_state {
1090 struct nl_dump dump;
1091 struct ofpbuf buf;
1092 };
1093
1094 static void
1095 dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
1096 struct nl_dump *dump)
1097 {
1098 struct dpif_netlink_vport request;
1099 struct ofpbuf *buf;
1100
1101 dpif_netlink_vport_init(&request);
1102 request.cmd = OVS_VPORT_CMD_GET;
1103 request.dp_ifindex = dpif->dp_ifindex;
1104
1105 buf = ofpbuf_new(1024);
1106 dpif_netlink_vport_to_ofpbuf(&request, buf);
1107 nl_dump_start(dump, NETLINK_GENERIC, buf);
1108 ofpbuf_delete(buf);
1109 }
1110
1111 static int
1112 dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
1113 {
1114 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1115 struct dpif_netlink_port_state *state;
1116
1117 *statep = state = xmalloc(sizeof *state);
1118 dpif_netlink_port_dump_start__(dpif, &state->dump);
1119
1120 ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
1121 return 0;
1122 }
1123
1124 static int
1125 dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
1126 struct nl_dump *dump,
1127 struct dpif_netlink_vport *vport,
1128 struct ofpbuf *buffer)
1129 {
1130 struct ofpbuf buf;
1131 int error;
1132
1133 if (!nl_dump_next(dump, &buf, buffer)) {
1134 return EOF;
1135 }
1136
1137 error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
1138 if (error) {
1139 VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
1140 dpif_name(&dpif->dpif), ovs_strerror(error));
1141 }
1142 return error;
1143 }
1144
1145 static int
1146 dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
1147 struct dpif_port *dpif_port)
1148 {
1149 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1150 struct dpif_netlink_port_state *state = state_;
1151 struct dpif_netlink_vport vport;
1152 int error;
1153
1154 error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
1155 &state->buf);
1156 if (error) {
1157 return error;
1158 }
1159 dpif_port->name = CONST_CAST(char *, vport.name);
1160 dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
1161 dpif_port->port_no = vport.port_no;
1162 return 0;
1163 }
1164
1165 static int
1166 dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
1167 {
1168 struct dpif_netlink_port_state *state = state_;
1169 int error = nl_dump_done(&state->dump);
1170
1171 ofpbuf_uninit(&state->buf);
1172 free(state);
1173 return error;
1174 }
1175
1176 static int
1177 dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
1178 {
1179 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1180
1181 /* Lazily create the Netlink socket to listen for notifications. */
1182 if (!dpif->port_notifier) {
1183 struct nl_sock *sock;
1184 int error;
1185
1186 error = nl_sock_create(NETLINK_GENERIC, &sock);
1187 if (error) {
1188 return error;
1189 }
1190
1191 error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
1192 if (error) {
1193 nl_sock_destroy(sock);
1194 return error;
1195 }
1196 dpif->port_notifier = sock;
1197
1198 /* We have no idea of the current state so report that everything
1199 * changed. */
1200 return ENOBUFS;
1201 }
1202
1203 for (;;) {
1204 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1205 uint64_t buf_stub[4096 / 8];
1206 struct ofpbuf buf;
1207 int error;
1208
1209 ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
1210 error = nl_sock_recv(dpif->port_notifier, &buf, NULL, false);
1211 if (!error) {
1212 struct dpif_netlink_vport vport;
1213
1214 error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
1215 if (!error) {
1216 if (vport.dp_ifindex == dpif->dp_ifindex
1217 && (vport.cmd == OVS_VPORT_CMD_NEW
1218 || vport.cmd == OVS_VPORT_CMD_DEL
1219 || vport.cmd == OVS_VPORT_CMD_SET)) {
1220 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
1221 dpif->dpif.full_name, vport.name, vport.cmd);
1222 if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
1223 dpif->refresh_channels = true;
1224 }
1225 *devnamep = xstrdup(vport.name);
1226 ofpbuf_uninit(&buf);
1227 return 0;
1228 }
1229 }
1230 } else if (error != EAGAIN) {
1231 VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
1232 ovs_strerror(error));
1233 nl_sock_drain(dpif->port_notifier);
1234 error = ENOBUFS;
1235 }
1236
1237 ofpbuf_uninit(&buf);
1238 if (error) {
1239 return error;
1240 }
1241 }
1242 }
1243
1244 static void
1245 dpif_netlink_port_poll_wait(const struct dpif *dpif_)
1246 {
1247 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1248
1249 if (dpif->port_notifier) {
1250 nl_sock_wait(dpif->port_notifier, POLLIN);
1251 } else {
1252 poll_immediate_wake();
1253 }
1254 }
1255
1256 static void
1257 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
1258 const ovs_u128 *ufid, bool terse)
1259 {
1260 if (ufid) {
1261 request->ufid = *ufid;
1262 request->ufid_present = true;
1263 } else {
1264 request->ufid_present = false;
1265 }
1266 request->ufid_terse = terse;
1267 }
1268
1269 static void
1270 dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
1271 const struct nlattr *key, size_t key_len,
1272 const ovs_u128 *ufid, bool terse,
1273 struct dpif_netlink_flow *request)
1274 {
1275 dpif_netlink_flow_init(request);
1276 request->cmd = OVS_FLOW_CMD_GET;
1277 request->dp_ifindex = dpif->dp_ifindex;
1278 request->key = key;
1279 request->key_len = key_len;
1280 dpif_netlink_flow_init_ufid(request, ufid, terse);
1281 }
1282
1283 static void
1284 dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
1285 const struct dpif_flow_get *get,
1286 struct dpif_netlink_flow *request)
1287 {
1288 dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
1289 false, request);
1290 }
1291
1292 static int
1293 dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
1294 const struct nlattr *key, size_t key_len,
1295 const ovs_u128 *ufid, bool terse,
1296 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1297 {
1298 struct dpif_netlink_flow request;
1299
1300 dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
1301 return dpif_netlink_flow_transact(&request, reply, bufp);
1302 }
1303
1304 static int
1305 dpif_netlink_flow_get(const struct dpif_netlink *dpif,
1306 const struct dpif_netlink_flow *flow,
1307 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1308 {
1309 return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
1310 flow->ufid_present ? &flow->ufid : NULL,
1311 false, reply, bufp);
1312 }
1313
1314 static void
1315 dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
1316 const struct dpif_flow_put *put,
1317 struct dpif_netlink_flow *request)
1318 {
1319 static const struct nlattr dummy_action;
1320
1321 dpif_netlink_flow_init(request);
1322 request->cmd = (put->flags & DPIF_FP_CREATE
1323 ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
1324 request->dp_ifindex = dpif->dp_ifindex;
1325 request->key = put->key;
1326 request->key_len = put->key_len;
1327 request->mask = put->mask;
1328 request->mask_len = put->mask_len;
1329 dpif_netlink_flow_init_ufid(request, put->ufid, false);
1330
1331 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1332 request->actions = (put->actions
1333 ? put->actions
1334 : CONST_CAST(struct nlattr *, &dummy_action));
1335 request->actions_len = put->actions_len;
1336 if (put->flags & DPIF_FP_ZERO_STATS) {
1337 request->clear = true;
1338 }
1339 if (put->flags & DPIF_FP_PROBE) {
1340 request->probe = true;
1341 }
1342 request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
1343 }
1344
1345 static void
1346 dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
1347 const struct nlattr *key, size_t key_len,
1348 const ovs_u128 *ufid, bool terse,
1349 struct dpif_netlink_flow *request)
1350 {
1351 dpif_netlink_flow_init(request);
1352 request->cmd = OVS_FLOW_CMD_DEL;
1353 request->dp_ifindex = dpif->dp_ifindex;
1354 request->key = key;
1355 request->key_len = key_len;
1356 dpif_netlink_flow_init_ufid(request, ufid, terse);
1357 }
1358
1359 static void
1360 dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
1361 const struct dpif_flow_del *del,
1362 struct dpif_netlink_flow *request)
1363 {
1364 dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
1365 del->ufid, del->terse, request);
1366 }
1367
1368 struct dpif_netlink_flow_dump {
1369 struct dpif_flow_dump up;
1370 struct nl_dump nl_dump;
1371 atomic_int status;
1372 struct netdev_flow_dump **netdev_dumps;
1373 int netdev_dumps_num; /* Number of netdev_flow_dumps */
1374 struct ovs_mutex netdev_lock; /* Guards the following. */
1375 int netdev_current_dump OVS_GUARDED; /* Shared current dump */
1376 struct dpif_flow_dump_types types; /* Type of dump */
1377 };
1378
1379 static struct dpif_netlink_flow_dump *
1380 dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
1381 {
1382 return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
1383 }
1384
1385 static void
1386 start_netdev_dump(const struct dpif *dpif_,
1387 struct dpif_netlink_flow_dump *dump)
1388 {
1389 ovs_mutex_init(&dump->netdev_lock);
1390
1391 if (!(dump->types.netdev_flows)) {
1392 dump->netdev_dumps_num = 0;
1393 dump->netdev_dumps = NULL;
1394 return;
1395 }
1396
1397 ovs_mutex_lock(&dump->netdev_lock);
1398 dump->netdev_current_dump = 0;
1399 dump->netdev_dumps
1400 = netdev_ports_flow_dump_create(dpif_->dpif_class,
1401 &dump->netdev_dumps_num);
1402 ovs_mutex_unlock(&dump->netdev_lock);
1403 }
1404
1405 static void
1406 dpif_netlink_populate_flow_dump_types(struct dpif_netlink_flow_dump *dump,
1407 struct dpif_flow_dump_types *types)
1408 {
1409 if (!types) {
1410 dump->types.ovs_flows = true;
1411 dump->types.netdev_flows = true;
1412 } else {
1413 memcpy(&dump->types, types, sizeof *types);
1414 }
1415 }
1416
1417 static struct dpif_flow_dump *
1418 dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse,
1419 struct dpif_flow_dump_types *types)
1420 {
1421 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1422 struct dpif_netlink_flow_dump *dump;
1423 struct dpif_netlink_flow request;
1424 struct ofpbuf *buf;
1425
1426 dump = xmalloc(sizeof *dump);
1427 dpif_flow_dump_init(&dump->up, dpif_);
1428
1429 dpif_netlink_populate_flow_dump_types(dump, types);
1430
1431 if (dump->types.ovs_flows) {
1432 dpif_netlink_flow_init(&request);
1433 request.cmd = OVS_FLOW_CMD_GET;
1434 request.dp_ifindex = dpif->dp_ifindex;
1435 request.ufid_present = false;
1436 request.ufid_terse = terse;
1437
1438 buf = ofpbuf_new(1024);
1439 dpif_netlink_flow_to_ofpbuf(&request, buf);
1440 nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
1441 ofpbuf_delete(buf);
1442 }
1443 atomic_init(&dump->status, 0);
1444 dump->up.terse = terse;
1445
1446 start_netdev_dump(dpif_, dump);
1447
1448 return &dump->up;
1449 }
1450
1451 static int
1452 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
1453 {
1454 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1455 unsigned int nl_status = 0;
1456 int dump_status;
1457
1458 if (dump->types.ovs_flows) {
1459 nl_status = nl_dump_done(&dump->nl_dump);
1460 }
1461
1462 for (int i = 0; i < dump->netdev_dumps_num; i++) {
1463 int err = netdev_flow_dump_destroy(dump->netdev_dumps[i]);
1464
1465 if (err != 0 && err != EOPNOTSUPP) {
1466 VLOG_ERR("failed dumping netdev: %s", ovs_strerror(err));
1467 }
1468 }
1469
1470 free(dump->netdev_dumps);
1471 ovs_mutex_destroy(&dump->netdev_lock);
1472
1473 /* No other thread has access to 'dump' at this point. */
1474 atomic_read_relaxed(&dump->status, &dump_status);
1475 free(dump);
1476 return dump_status ? dump_status : nl_status;
1477 }
1478
1479 struct dpif_netlink_flow_dump_thread {
1480 struct dpif_flow_dump_thread up;
1481 struct dpif_netlink_flow_dump *dump;
1482 struct dpif_netlink_flow flow;
1483 struct dpif_flow_stats stats;
1484 struct ofpbuf nl_flows; /* Always used to store flows. */
1485 struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
1486 int netdev_dump_idx; /* This thread current netdev dump index */
1487 bool netdev_done; /* If we are finished dumping netdevs */
1488
1489 /* (Key/Mask/Actions) Buffers for netdev dumping */
1490 struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
1491 struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
1492 struct odputil_keybuf actbuf[FLOW_DUMP_MAX_BATCH];
1493 };
1494
1495 static struct dpif_netlink_flow_dump_thread *
1496 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
1497 {
1498 return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
1499 }
1500
1501 static struct dpif_flow_dump_thread *
1502 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
1503 {
1504 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1505 struct dpif_netlink_flow_dump_thread *thread;
1506
1507 thread = xmalloc(sizeof *thread);
1508 dpif_flow_dump_thread_init(&thread->up, &dump->up);
1509 thread->dump = dump;
1510 ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
1511 thread->nl_actions = NULL;
1512 thread->netdev_dump_idx = 0;
1513 thread->netdev_done = !(thread->netdev_dump_idx < dump->netdev_dumps_num);
1514
1515 return &thread->up;
1516 }
1517
1518 static void
1519 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
1520 {
1521 struct dpif_netlink_flow_dump_thread *thread
1522 = dpif_netlink_flow_dump_thread_cast(thread_);
1523
1524 ofpbuf_uninit(&thread->nl_flows);
1525 ofpbuf_delete(thread->nl_actions);
1526 free(thread);
1527 }
1528
1529 static void
1530 dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
1531 const struct dpif_netlink_flow *datapath_flow)
1532 {
1533 dpif_flow->key = datapath_flow->key;
1534 dpif_flow->key_len = datapath_flow->key_len;
1535 dpif_flow->mask = datapath_flow->mask;
1536 dpif_flow->mask_len = datapath_flow->mask_len;
1537 dpif_flow->actions = datapath_flow->actions;
1538 dpif_flow->actions_len = datapath_flow->actions_len;
1539 dpif_flow->ufid_present = datapath_flow->ufid_present;
1540 dpif_flow->pmd_id = PMD_ID_NULL;
1541 if (datapath_flow->ufid_present) {
1542 dpif_flow->ufid = datapath_flow->ufid;
1543 } else {
1544 ovs_assert(datapath_flow->key && datapath_flow->key_len);
1545 dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
1546 &dpif_flow->ufid);
1547 }
1548 dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
1549 dpif_flow->attrs.offloaded = false;
1550 dpif_flow->attrs.dp_layer = "ovs";
1551 }
1552
1553 /* The design is such that all threads are working together on the first dump
1554 * to the last, in order (at first they all on dump 0).
1555 * When the first thread finds that the given dump is finished,
1556 * they all move to the next. If two or more threads find the same dump
1557 * is finished at the same time, the first one will advance the shared
1558 * netdev_current_dump and the others will catch up. */
1559 static void
1560 dpif_netlink_advance_netdev_dump(struct dpif_netlink_flow_dump_thread *thread)
1561 {
1562 struct dpif_netlink_flow_dump *dump = thread->dump;
1563
1564 ovs_mutex_lock(&dump->netdev_lock);
1565 /* if we haven't finished (dumped everything) */
1566 if (dump->netdev_current_dump < dump->netdev_dumps_num) {
1567 /* if we are the first to find that current dump is finished
1568 * advance it. */
1569 if (thread->netdev_dump_idx == dump->netdev_current_dump) {
1570 thread->netdev_dump_idx = ++dump->netdev_current_dump;
1571 /* did we just finish the last dump? done. */
1572 if (dump->netdev_current_dump == dump->netdev_dumps_num) {
1573 thread->netdev_done = true;
1574 }
1575 } else {
1576 /* otherwise, we are behind, catch up */
1577 thread->netdev_dump_idx = dump->netdev_current_dump;
1578 }
1579 } else {
1580 /* some other thread finished */
1581 thread->netdev_done = true;
1582 }
1583 ovs_mutex_unlock(&dump->netdev_lock);
1584 }
1585
1586 static int
1587 dpif_netlink_netdev_match_to_dpif_flow(struct match *match,
1588 struct ofpbuf *key_buf,
1589 struct ofpbuf *mask_buf,
1590 struct nlattr *actions,
1591 struct dpif_flow_stats *stats,
1592 struct dpif_flow_attrs *attrs,
1593 ovs_u128 *ufid,
1594 struct dpif_flow *flow,
1595 bool terse OVS_UNUSED)
1596 {
1597
1598 struct odp_flow_key_parms odp_parms = {
1599 .flow = &match->flow,
1600 .mask = &match->wc.masks,
1601 .support = {
1602 .max_vlan_headers = 2,
1603 },
1604 };
1605 size_t offset;
1606
1607 memset(flow, 0, sizeof *flow);
1608
1609 /* Key */
1610 offset = key_buf->size;
1611 flow->key = ofpbuf_tail(key_buf);
1612 odp_flow_key_from_flow(&odp_parms, key_buf);
1613 flow->key_len = key_buf->size - offset;
1614
1615 /* Mask */
1616 offset = mask_buf->size;
1617 flow->mask = ofpbuf_tail(mask_buf);
1618 odp_parms.key_buf = key_buf;
1619 odp_flow_key_from_mask(&odp_parms, mask_buf);
1620 flow->mask_len = mask_buf->size - offset;
1621
1622 /* Actions */
1623 flow->actions = nl_attr_get(actions);
1624 flow->actions_len = nl_attr_get_size(actions);
1625
1626 /* Stats */
1627 memcpy(&flow->stats, stats, sizeof *stats);
1628
1629 /* UFID */
1630 flow->ufid_present = true;
1631 flow->ufid = *ufid;
1632
1633 flow->pmd_id = PMD_ID_NULL;
1634
1635 memcpy(&flow->attrs, attrs, sizeof *attrs);
1636
1637 return 0;
1638 }
1639
1640 static int
1641 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
1642 struct dpif_flow *flows, int max_flows)
1643 {
1644 struct dpif_netlink_flow_dump_thread *thread
1645 = dpif_netlink_flow_dump_thread_cast(thread_);
1646 struct dpif_netlink_flow_dump *dump = thread->dump;
1647 struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
1648 int n_flows;
1649
1650 ofpbuf_delete(thread->nl_actions);
1651 thread->nl_actions = NULL;
1652
1653 n_flows = 0;
1654 max_flows = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
1655
1656 while (!thread->netdev_done && n_flows < max_flows) {
1657 struct odputil_keybuf *maskbuf = &thread->maskbuf[n_flows];
1658 struct odputil_keybuf *keybuf = &thread->keybuf[n_flows];
1659 struct odputil_keybuf *actbuf = &thread->actbuf[n_flows];
1660 struct ofpbuf key, mask, act;
1661 struct dpif_flow *f = &flows[n_flows];
1662 int cur = thread->netdev_dump_idx;
1663 struct netdev_flow_dump *netdev_dump = dump->netdev_dumps[cur];
1664 struct match match;
1665 struct nlattr *actions;
1666 struct dpif_flow_stats stats;
1667 struct dpif_flow_attrs attrs;
1668 ovs_u128 ufid;
1669 bool has_next;
1670
1671 ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
1672 ofpbuf_use_stack(&act, actbuf, sizeof *actbuf);
1673 ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
1674 has_next = netdev_flow_dump_next(netdev_dump, &match,
1675 &actions, &stats, &attrs,
1676 &ufid,
1677 &thread->nl_flows,
1678 &act);
1679 if (has_next) {
1680 dpif_netlink_netdev_match_to_dpif_flow(&match,
1681 &key, &mask,
1682 actions,
1683 &stats,
1684 &attrs,
1685 &ufid,
1686 f,
1687 dump->up.terse);
1688 n_flows++;
1689 } else {
1690 dpif_netlink_advance_netdev_dump(thread);
1691 }
1692 }
1693
1694 if (!(dump->types.ovs_flows)) {
1695 return n_flows;
1696 }
1697
1698 while (!n_flows
1699 || (n_flows < max_flows && thread->nl_flows.size)) {
1700 struct dpif_netlink_flow datapath_flow;
1701 struct ofpbuf nl_flow;
1702 int error;
1703
1704 /* Try to grab another flow. */
1705 if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
1706 break;
1707 }
1708
1709 /* Convert the flow to our output format. */
1710 error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
1711 if (error) {
1712 atomic_store_relaxed(&dump->status, error);
1713 break;
1714 }
1715
1716 if (dump->up.terse || datapath_flow.actions) {
1717 /* Common case: we don't want actions, or the flow includes
1718 * actions. */
1719 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1720 &datapath_flow);
1721 } else {
1722 /* Rare case: the flow does not include actions. Retrieve this
1723 * individual flow again to get the actions. */
1724 error = dpif_netlink_flow_get(dpif, &datapath_flow,
1725 &datapath_flow, &thread->nl_actions);
1726 if (error == ENOENT) {
1727 VLOG_DBG("dumped flow disappeared on get");
1728 continue;
1729 } else if (error) {
1730 VLOG_WARN("error fetching dumped flow: %s",
1731 ovs_strerror(error));
1732 atomic_store_relaxed(&dump->status, error);
1733 break;
1734 }
1735
1736 /* Save this flow. Then exit, because we only have one buffer to
1737 * handle this case. */
1738 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1739 &datapath_flow);
1740 break;
1741 }
1742 }
1743 return n_flows;
1744 }
1745
1746 static void
1747 dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
1748 struct ofpbuf *buf)
1749 {
1750 struct ovs_header *k_exec;
1751 size_t key_ofs;
1752
1753 ofpbuf_prealloc_tailroom(buf, (64
1754 + dp_packet_size(d_exec->packet)
1755 + ODP_KEY_METADATA_SIZE
1756 + d_exec->actions_len));
1757
1758 nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
1759 OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
1760
1761 k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
1762 k_exec->dp_ifindex = dp_ifindex;
1763
1764 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
1765 dp_packet_data(d_exec->packet),
1766 dp_packet_size(d_exec->packet));
1767
1768 key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
1769 odp_key_from_dp_packet(buf, d_exec->packet);
1770 nl_msg_end_nested(buf, key_ofs);
1771
1772 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
1773 d_exec->actions, d_exec->actions_len);
1774 if (d_exec->probe) {
1775 nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE);
1776 }
1777 if (d_exec->mtu) {
1778 nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu);
1779 }
1780
1781 if (d_exec->hash) {
1782 nl_msg_put_u64(buf, OVS_PACKET_ATTR_HASH, d_exec->hash);
1783 }
1784 }
1785
1786 /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
1787 * Returns the number actually executed (at least 1, if 'n_ops' is
1788 * positive). */
1789 static size_t
1790 dpif_netlink_operate__(struct dpif_netlink *dpif,
1791 struct dpif_op **ops, size_t n_ops)
1792 {
1793 struct op_auxdata {
1794 struct nl_transaction txn;
1795
1796 struct ofpbuf request;
1797 uint64_t request_stub[1024 / 8];
1798
1799 struct ofpbuf reply;
1800 uint64_t reply_stub[1024 / 8];
1801 } auxes[OPERATE_MAX_OPS];
1802
1803 struct nl_transaction *txnsp[OPERATE_MAX_OPS];
1804 size_t i;
1805
1806 n_ops = MIN(n_ops, OPERATE_MAX_OPS);
1807 for (i = 0; i < n_ops; i++) {
1808 struct op_auxdata *aux = &auxes[i];
1809 struct dpif_op *op = ops[i];
1810 struct dpif_flow_put *put;
1811 struct dpif_flow_del *del;
1812 struct dpif_flow_get *get;
1813 struct dpif_netlink_flow flow;
1814
1815 ofpbuf_use_stub(&aux->request,
1816 aux->request_stub, sizeof aux->request_stub);
1817 aux->txn.request = &aux->request;
1818
1819 ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
1820 aux->txn.reply = NULL;
1821
1822 switch (op->type) {
1823 case DPIF_OP_FLOW_PUT:
1824 put = &op->flow_put;
1825 dpif_netlink_init_flow_put(dpif, put, &flow);
1826 if (put->stats) {
1827 flow.nlmsg_flags |= NLM_F_ECHO;
1828 aux->txn.reply = &aux->reply;
1829 }
1830 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1831 break;
1832
1833 case DPIF_OP_FLOW_DEL:
1834 del = &op->flow_del;
1835 dpif_netlink_init_flow_del(dpif, del, &flow);
1836 if (del->stats) {
1837 flow.nlmsg_flags |= NLM_F_ECHO;
1838 aux->txn.reply = &aux->reply;
1839 }
1840 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1841 break;
1842
1843 case DPIF_OP_EXECUTE:
1844 /* Can't execute a packet that won't fit in a Netlink attribute. */
1845 if (OVS_UNLIKELY(nl_attr_oversized(
1846 dp_packet_size(op->execute.packet)))) {
1847 /* Report an error immediately if this is the first operation.
1848 * Otherwise the easiest thing to do is to postpone to the next
1849 * call (when this will be the first operation). */
1850 if (i == 0) {
1851 VLOG_ERR_RL(&error_rl,
1852 "dropping oversized %"PRIu32"-byte packet",
1853 dp_packet_size(op->execute.packet));
1854 op->error = ENOBUFS;
1855 return 1;
1856 }
1857 n_ops = i;
1858 } else {
1859 dpif_netlink_encode_execute(dpif->dp_ifindex, &op->execute,
1860 &aux->request);
1861 }
1862 break;
1863
1864 case DPIF_OP_FLOW_GET:
1865 get = &op->flow_get;
1866 dpif_netlink_init_flow_get(dpif, get, &flow);
1867 aux->txn.reply = get->buffer;
1868 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1869 break;
1870
1871 default:
1872 OVS_NOT_REACHED();
1873 }
1874 }
1875
1876 for (i = 0; i < n_ops; i++) {
1877 txnsp[i] = &auxes[i].txn;
1878 }
1879 nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
1880
1881 for (i = 0; i < n_ops; i++) {
1882 struct op_auxdata *aux = &auxes[i];
1883 struct nl_transaction *txn = &auxes[i].txn;
1884 struct dpif_op *op = ops[i];
1885 struct dpif_flow_put *put;
1886 struct dpif_flow_del *del;
1887 struct dpif_flow_get *get;
1888
1889 op->error = txn->error;
1890
1891 switch (op->type) {
1892 case DPIF_OP_FLOW_PUT:
1893 put = &op->flow_put;
1894 if (put->stats) {
1895 if (!op->error) {
1896 struct dpif_netlink_flow reply;
1897
1898 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1899 txn->reply);
1900 if (!op->error) {
1901 dpif_netlink_flow_get_stats(&reply, put->stats);
1902 }
1903 }
1904 }
1905 break;
1906
1907 case DPIF_OP_FLOW_DEL:
1908 del = &op->flow_del;
1909 if (del->stats) {
1910 if (!op->error) {
1911 struct dpif_netlink_flow reply;
1912
1913 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1914 txn->reply);
1915 if (!op->error) {
1916 dpif_netlink_flow_get_stats(&reply, del->stats);
1917 }
1918 }
1919 }
1920 break;
1921
1922 case DPIF_OP_EXECUTE:
1923 break;
1924
1925 case DPIF_OP_FLOW_GET:
1926 get = &op->flow_get;
1927 if (!op->error) {
1928 struct dpif_netlink_flow reply;
1929
1930 op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
1931 if (!op->error) {
1932 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
1933 &reply);
1934 }
1935 }
1936 break;
1937
1938 default:
1939 OVS_NOT_REACHED();
1940 }
1941
1942 ofpbuf_uninit(&aux->request);
1943 ofpbuf_uninit(&aux->reply);
1944 }
1945
1946 return n_ops;
1947 }
1948
1949 static int
1950 parse_flow_get(struct dpif_netlink *dpif, struct dpif_flow_get *get)
1951 {
1952 struct dpif_flow *dpif_flow = get->flow;
1953 struct match match;
1954 struct nlattr *actions;
1955 struct dpif_flow_stats stats;
1956 struct dpif_flow_attrs attrs;
1957 struct ofpbuf buf;
1958 uint64_t act_buf[1024 / 8];
1959 struct odputil_keybuf maskbuf;
1960 struct odputil_keybuf keybuf;
1961 struct odputil_keybuf actbuf;
1962 struct ofpbuf key, mask, act;
1963 int err;
1964
1965 ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
1966 err = netdev_ports_flow_get(dpif->dpif.dpif_class, &match,
1967 &actions, get->ufid, &stats, &attrs, &buf);
1968 if (err) {
1969 return err;
1970 }
1971
1972 VLOG_DBG("found flow from netdev, translating to dpif flow");
1973
1974 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
1975 ofpbuf_use_stack(&act, &actbuf, sizeof actbuf);
1976 ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
1977 dpif_netlink_netdev_match_to_dpif_flow(&match, &key, &mask, actions,
1978 &stats, &attrs,
1979 (ovs_u128 *) get->ufid,
1980 dpif_flow,
1981 false);
1982 ofpbuf_put(get->buffer, nl_attr_get(actions), nl_attr_get_size(actions));
1983 dpif_flow->actions = ofpbuf_at(get->buffer, 0, 0);
1984 dpif_flow->actions_len = nl_attr_get_size(actions);
1985
1986 return 0;
1987 }
1988
1989 static int
1990 parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put)
1991 {
1992 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1993 const struct dpif_class *dpif_class = dpif->dpif.dpif_class;
1994 struct match match;
1995 odp_port_t in_port;
1996 const struct nlattr *nla;
1997 size_t left;
1998 struct netdev *dev;
1999 struct offload_info info;
2000 ovs_be16 dst_port = 0;
2001 uint8_t csum_on = false;
2002 int err;
2003
2004 if (put->flags & DPIF_FP_PROBE) {
2005 return EOPNOTSUPP;
2006 }
2007
2008 err = parse_key_and_mask_to_match(put->key, put->key_len, put->mask,
2009 put->mask_len, &match);
2010 if (err) {
2011 return err;
2012 }
2013
2014 in_port = match.flow.in_port.odp_port;
2015 dev = netdev_ports_get(in_port, dpif_class);
2016 if (!dev) {
2017 return EOPNOTSUPP;
2018 }
2019
2020 /* Get tunnel dst port */
2021 NL_ATTR_FOR_EACH(nla, left, put->actions, put->actions_len) {
2022 if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
2023 const struct netdev_tunnel_config *tnl_cfg;
2024 struct netdev *outdev;
2025 odp_port_t out_port;
2026
2027 out_port = nl_attr_get_odp_port(nla);
2028 outdev = netdev_ports_get(out_port, dpif_class);
2029 if (!outdev) {
2030 err = EOPNOTSUPP;
2031 goto out;
2032 }
2033 tnl_cfg = netdev_get_tunnel_config(outdev);
2034 if (tnl_cfg && tnl_cfg->dst_port != 0) {
2035 dst_port = tnl_cfg->dst_port;
2036 }
2037 if (tnl_cfg) {
2038 csum_on = tnl_cfg->csum;
2039 }
2040 netdev_close(outdev);
2041 }
2042 }
2043
2044 info.dpif_class = dpif_class;
2045 info.tp_dst_port = dst_port;
2046 info.tunnel_csum_on = csum_on;
2047 err = netdev_flow_put(dev, &match,
2048 CONST_CAST(struct nlattr *, put->actions),
2049 put->actions_len,
2050 CONST_CAST(ovs_u128 *, put->ufid),
2051 &info, put->stats);
2052
2053 if (!err) {
2054 if (put->flags & DPIF_FP_MODIFY) {
2055 struct dpif_op *opp;
2056 struct dpif_op op;
2057
2058 op.type = DPIF_OP_FLOW_DEL;
2059 op.flow_del.key = put->key;
2060 op.flow_del.key_len = put->key_len;
2061 op.flow_del.ufid = put->ufid;
2062 op.flow_del.pmd_id = put->pmd_id;
2063 op.flow_del.stats = NULL;
2064 op.flow_del.terse = false;
2065
2066 opp = &op;
2067 dpif_netlink_operate__(dpif, &opp, 1);
2068 }
2069
2070 VLOG_DBG("added flow");
2071 } else if (err != EEXIST) {
2072 struct netdev *oor_netdev = NULL;
2073 enum vlog_level level;
2074 if (err == ENOSPC && netdev_is_offload_rebalance_policy_enabled()) {
2075 /*
2076 * We need to set OOR on the input netdev (i.e, 'dev') for the
2077 * flow. But if the flow has a tunnel attribute (i.e, decap action,
2078 * with a virtual device like a VxLAN interface as its in-port),
2079 * then lookup and set OOR on the underlying tunnel (real) netdev.
2080 */
2081 oor_netdev = flow_get_tunnel_netdev(&match.flow.tunnel);
2082 if (!oor_netdev) {
2083 /* Not a 'tunnel' flow */
2084 oor_netdev = dev;
2085 }
2086 netdev_set_hw_info(oor_netdev, HW_INFO_TYPE_OOR, true);
2087 }
2088 level = (err == ENOSPC || err == EOPNOTSUPP) ? VLL_DBG : VLL_ERR;
2089 VLOG_RL(&rl, level, "failed to offload flow: %s: %s",
2090 ovs_strerror(err),
2091 (oor_netdev ? oor_netdev->name : dev->name));
2092 }
2093
2094 out:
2095 if (err && err != EEXIST && (put->flags & DPIF_FP_MODIFY)) {
2096 /* Modified rule can't be offloaded, try and delete from HW */
2097 int del_err = netdev_flow_del(dev, put->ufid, put->stats);
2098
2099 if (!del_err) {
2100 /* Delete from hw success, so old flow was offloaded.
2101 * Change flags to create the flow in kernel */
2102 put->flags &= ~DPIF_FP_MODIFY;
2103 put->flags |= DPIF_FP_CREATE;
2104 } else if (del_err != ENOENT) {
2105 VLOG_ERR_RL(&rl, "failed to delete offloaded flow: %s",
2106 ovs_strerror(del_err));
2107 /* stop proccesing the flow in kernel */
2108 err = 0;
2109 }
2110 }
2111
2112 netdev_close(dev);
2113
2114 return err;
2115 }
2116
2117 static int
2118 try_send_to_netdev(struct dpif_netlink *dpif, struct dpif_op *op)
2119 {
2120 int err = EOPNOTSUPP;
2121
2122 switch (op->type) {
2123 case DPIF_OP_FLOW_PUT: {
2124 struct dpif_flow_put *put = &op->flow_put;
2125
2126 if (!put->ufid) {
2127 break;
2128 }
2129
2130 log_flow_put_message(&dpif->dpif, &this_module, put, 0);
2131 err = parse_flow_put(dpif, put);
2132 break;
2133 }
2134 case DPIF_OP_FLOW_DEL: {
2135 struct dpif_flow_del *del = &op->flow_del;
2136
2137 if (!del->ufid) {
2138 break;
2139 }
2140
2141 log_flow_del_message(&dpif->dpif, &this_module, del, 0);
2142 err = netdev_ports_flow_del(dpif->dpif.dpif_class, del->ufid,
2143 del->stats);
2144 break;
2145 }
2146 case DPIF_OP_FLOW_GET: {
2147 struct dpif_flow_get *get = &op->flow_get;
2148
2149 if (!op->flow_get.ufid) {
2150 break;
2151 }
2152
2153 log_flow_get_message(&dpif->dpif, &this_module, get, 0);
2154 err = parse_flow_get(dpif, get);
2155 break;
2156 }
2157 case DPIF_OP_EXECUTE:
2158 default:
2159 break;
2160 }
2161
2162 return err;
2163 }
2164
2165 static void
2166 dpif_netlink_operate_chunks(struct dpif_netlink *dpif, struct dpif_op **ops,
2167 size_t n_ops)
2168 {
2169 while (n_ops > 0) {
2170 size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops);
2171
2172 ops += chunk;
2173 n_ops -= chunk;
2174 }
2175 }
2176
2177 static void
2178 dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops,
2179 enum dpif_offload_type offload_type)
2180 {
2181 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2182 struct dpif_op *new_ops[OPERATE_MAX_OPS];
2183 int count = 0;
2184 int i = 0;
2185 int err = 0;
2186
2187 if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
2188 VLOG_DBG("Invalid offload_type: %d", offload_type);
2189 return;
2190 }
2191
2192 if (offload_type != DPIF_OFFLOAD_NEVER && netdev_is_flow_api_enabled()) {
2193 while (n_ops > 0) {
2194 count = 0;
2195
2196 while (n_ops > 0 && count < OPERATE_MAX_OPS) {
2197 struct dpif_op *op = ops[i++];
2198
2199 err = try_send_to_netdev(dpif, op);
2200 if (err && err != EEXIST) {
2201 if (offload_type == DPIF_OFFLOAD_ALWAYS) {
2202 /* We got an error while offloading an op. Since
2203 * OFFLOAD_ALWAYS is specified, we stop further
2204 * processing and return to the caller without
2205 * invoking kernel datapath as fallback. But the
2206 * interface requires us to process all n_ops; so
2207 * return the same error in the remaining ops too.
2208 */
2209 op->error = err;
2210 n_ops--;
2211 while (n_ops > 0) {
2212 op = ops[i++];
2213 op->error = err;
2214 n_ops--;
2215 }
2216 return;
2217 }
2218 new_ops[count++] = op;
2219 } else {
2220 op->error = err;
2221 }
2222
2223 n_ops--;
2224 }
2225
2226 dpif_netlink_operate_chunks(dpif, new_ops, count);
2227 }
2228 } else if (offload_type != DPIF_OFFLOAD_ALWAYS) {
2229 dpif_netlink_operate_chunks(dpif, ops, n_ops);
2230 }
2231 }
2232
2233 #if _WIN32
2234 static void
2235 dpif_netlink_handler_uninit(struct dpif_handler *handler)
2236 {
2237 vport_delete_sock_pool(handler);
2238 }
2239
2240 static int
2241 dpif_netlink_handler_init(struct dpif_handler *handler)
2242 {
2243 return vport_create_sock_pool(handler);
2244 }
2245 #else
2246
2247 static int
2248 dpif_netlink_handler_init(struct dpif_handler *handler)
2249 {
2250 handler->epoll_fd = epoll_create(10);
2251 return handler->epoll_fd < 0 ? errno : 0;
2252 }
2253
2254 static void
2255 dpif_netlink_handler_uninit(struct dpif_handler *handler)
2256 {
2257 close(handler->epoll_fd);
2258 }
2259 #endif
2260
2261 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
2262 * currently in 'dpif' in the kernel, by adding a new set of channels for
2263 * any kernel vport that lacks one and deleting any channels that have no
2264 * backing kernel vports. */
2265 static int
2266 dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
2267 OVS_REQ_WRLOCK(dpif->upcall_lock)
2268 {
2269 unsigned long int *keep_channels;
2270 struct dpif_netlink_vport vport;
2271 size_t keep_channels_nbits;
2272 struct nl_dump dump;
2273 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
2274 struct ofpbuf buf;
2275 int retval = 0;
2276 size_t i;
2277
2278 ovs_assert(!WINDOWS || n_handlers <= 1);
2279 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
2280
2281 if (dpif->n_handlers != n_handlers) {
2282 destroy_all_channels(dpif);
2283 dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
2284 for (i = 0; i < n_handlers; i++) {
2285 int error;
2286 struct dpif_handler *handler = &dpif->handlers[i];
2287
2288 error = dpif_netlink_handler_init(handler);
2289 if (error) {
2290 size_t j;
2291
2292 for (j = 0; j < i; j++) {
2293 struct dpif_handler *tmp = &dpif->handlers[j];
2294 dpif_netlink_handler_uninit(tmp);
2295 }
2296 free(dpif->handlers);
2297 dpif->handlers = NULL;
2298
2299 return error;
2300 }
2301 }
2302 dpif->n_handlers = n_handlers;
2303 }
2304
2305 for (i = 0; i < n_handlers; i++) {
2306 struct dpif_handler *handler = &dpif->handlers[i];
2307
2308 handler->event_offset = handler->n_events = 0;
2309 }
2310
2311 keep_channels_nbits = dpif->uc_array_size;
2312 keep_channels = bitmap_allocate(keep_channels_nbits);
2313
2314 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
2315 dpif_netlink_port_dump_start__(dpif, &dump);
2316 while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
2317 uint32_t port_no = odp_to_u32(vport.port_no);
2318 uint32_t upcall_pid;
2319 int error;
2320
2321 if (port_no >= dpif->uc_array_size
2322 || !vport_get_pid(dpif, port_no, &upcall_pid)) {
2323 struct nl_sock *sock;
2324 error = create_nl_sock(dpif, &sock);
2325
2326 if (error) {
2327 goto error;
2328 }
2329
2330 error = vport_add_channel(dpif, vport.port_no, sock);
2331 if (error) {
2332 VLOG_INFO("%s: could not add channels for port %s",
2333 dpif_name(&dpif->dpif), vport.name);
2334 nl_sock_destroy(sock);
2335 retval = error;
2336 goto error;
2337 }
2338 upcall_pid = nl_sock_pid(sock);
2339 }
2340
2341 /* Configure the vport to deliver misses to 'sock'. */
2342 if (vport.upcall_pids[0] == 0
2343 || vport.n_upcall_pids != 1
2344 || upcall_pid != vport.upcall_pids[0]) {
2345 struct dpif_netlink_vport vport_request;
2346
2347 dpif_netlink_vport_init(&vport_request);
2348 vport_request.cmd = OVS_VPORT_CMD_SET;
2349 vport_request.dp_ifindex = dpif->dp_ifindex;
2350 vport_request.port_no = vport.port_no;
2351 vport_request.n_upcall_pids = 1;
2352 vport_request.upcall_pids = &upcall_pid;
2353 error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
2354 if (error) {
2355 VLOG_WARN_RL(&error_rl,
2356 "%s: failed to set upcall pid on port: %s",
2357 dpif_name(&dpif->dpif), ovs_strerror(error));
2358
2359 if (error != ENODEV && error != ENOENT) {
2360 retval = error;
2361 } else {
2362 /* The vport isn't really there, even though the dump says
2363 * it is. Probably we just hit a race after a port
2364 * disappeared. */
2365 }
2366 goto error;
2367 }
2368 }
2369
2370 if (port_no < keep_channels_nbits) {
2371 bitmap_set1(keep_channels, port_no);
2372 }
2373 continue;
2374
2375 error:
2376 vport_del_channels(dpif, vport.port_no);
2377 }
2378 nl_dump_done(&dump);
2379 ofpbuf_uninit(&buf);
2380
2381 /* Discard any saved channels that we didn't reuse. */
2382 for (i = 0; i < keep_channels_nbits; i++) {
2383 if (!bitmap_is_set(keep_channels, i)) {
2384 vport_del_channels(dpif, u32_to_odp(i));
2385 }
2386 }
2387 free(keep_channels);
2388
2389 return retval;
2390 }
2391
2392 static int
2393 dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
2394 OVS_REQ_WRLOCK(dpif->upcall_lock)
2395 {
2396 if ((dpif->handlers != NULL) == enable) {
2397 return 0;
2398 } else if (!enable) {
2399 destroy_all_channels(dpif);
2400 return 0;
2401 } else {
2402 return dpif_netlink_refresh_channels(dpif, 1);
2403 }
2404 }
2405
2406 static int
2407 dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
2408 {
2409 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2410 int error;
2411
2412 fat_rwlock_wrlock(&dpif->upcall_lock);
2413 error = dpif_netlink_recv_set__(dpif, enable);
2414 fat_rwlock_unlock(&dpif->upcall_lock);
2415
2416 return error;
2417 }
2418
2419 static int
2420 dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
2421 {
2422 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2423 int error = 0;
2424
2425 #ifdef _WIN32
2426 /* Multiple upcall handlers will be supported once kernel datapath supports
2427 * it. */
2428 if (n_handlers > 1) {
2429 return error;
2430 }
2431 #endif
2432
2433 fat_rwlock_wrlock(&dpif->upcall_lock);
2434 if (dpif->handlers) {
2435 error = dpif_netlink_refresh_channels(dpif, n_handlers);
2436 }
2437 fat_rwlock_unlock(&dpif->upcall_lock);
2438
2439 return error;
2440 }
2441
2442 static int
2443 dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
2444 uint32_t queue_id, uint32_t *priority)
2445 {
2446 if (queue_id < 0xf000) {
2447 *priority = TC_H_MAKE(1 << 16, queue_id + 1);
2448 return 0;
2449 } else {
2450 return EINVAL;
2451 }
2452 }
2453
2454 static int
2455 parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
2456 struct dpif_upcall *upcall, int *dp_ifindex)
2457 {
2458 static const struct nl_policy ovs_packet_policy[] = {
2459 /* Always present. */
2460 [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
2461 .min_len = ETH_HEADER_LEN },
2462 [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
2463
2464 /* OVS_PACKET_CMD_ACTION only. */
2465 [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
2466 [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
2467 [OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
2468 [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true },
2469 [OVS_PACKET_ATTR_HASH] = { .type = NL_A_U64, .optional = true }
2470 };
2471
2472 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
2473 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2474 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
2475 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2476
2477 struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
2478 if (!nlmsg || !genl || !ovs_header
2479 || nlmsg->nlmsg_type != ovs_packet_family
2480 || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
2481 ARRAY_SIZE(ovs_packet_policy))) {
2482 return EINVAL;
2483 }
2484
2485 int type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
2486 : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
2487 : -1);
2488 if (type < 0) {
2489 return EINVAL;
2490 }
2491
2492 /* (Re)set ALL fields of '*upcall' on successful return. */
2493 upcall->type = type;
2494 upcall->key = CONST_CAST(struct nlattr *,
2495 nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
2496 upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
2497 dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
2498 upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
2499 upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
2500 upcall->actions = a[OVS_PACKET_ATTR_ACTIONS];
2501 upcall->mru = a[OVS_PACKET_ATTR_MRU];
2502 upcall->hash = a[OVS_PACKET_ATTR_HASH];
2503
2504 /* Allow overwriting the netlink attribute header without reallocating. */
2505 dp_packet_use_stub(&upcall->packet,
2506 CONST_CAST(struct nlattr *,
2507 nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
2508 nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
2509 sizeof(struct nlattr));
2510 dp_packet_set_data(&upcall->packet,
2511 (char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr));
2512 dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
2513
2514 if (nl_attr_find__(upcall->key, upcall->key_len, OVS_KEY_ATTR_ETHERNET)) {
2515 /* Ethernet frame */
2516 upcall->packet.packet_type = htonl(PT_ETH);
2517 } else {
2518 /* Non-Ethernet packet. Get the Ethertype from the NL attributes */
2519 ovs_be16 ethertype = 0;
2520 const struct nlattr *et_nla = nl_attr_find__(upcall->key,
2521 upcall->key_len,
2522 OVS_KEY_ATTR_ETHERTYPE);
2523 if (et_nla) {
2524 ethertype = nl_attr_get_be16(et_nla);
2525 }
2526 upcall->packet.packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
2527 ntohs(ethertype));
2528 dp_packet_set_l3(&upcall->packet, dp_packet_data(&upcall->packet));
2529 }
2530
2531 *dp_ifindex = ovs_header->dp_ifindex;
2532
2533 return 0;
2534 }
2535
2536 #ifdef _WIN32
2537 #define PACKET_RECV_BATCH_SIZE 50
2538 static int
2539 dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
2540 struct dpif_upcall *upcall, struct ofpbuf *buf)
2541 OVS_REQ_RDLOCK(dpif->upcall_lock)
2542 {
2543 struct dpif_handler *handler;
2544 int read_tries = 0;
2545 struct dpif_windows_vport_sock *sock_pool;
2546 uint32_t i;
2547
2548 if (!dpif->handlers) {
2549 return EAGAIN;
2550 }
2551
2552 /* Only one handler is supported currently. */
2553 if (handler_id >= 1) {
2554 return EAGAIN;
2555 }
2556
2557 if (handler_id >= dpif->n_handlers) {
2558 return EAGAIN;
2559 }
2560
2561 handler = &dpif->handlers[handler_id];
2562 sock_pool = handler->vport_sock_pool;
2563
2564 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2565 for (;;) {
2566 int dp_ifindex;
2567 int error;
2568
2569 if (++read_tries > PACKET_RECV_BATCH_SIZE) {
2570 return EAGAIN;
2571 }
2572
2573 error = nl_sock_recv(sock_pool[i].nl_sock, buf, NULL, false);
2574 if (error == ENOBUFS) {
2575 /* ENOBUFS typically means that we've received so many
2576 * packets that the buffer overflowed. Try again
2577 * immediately because there's almost certainly a packet
2578 * waiting for us. */
2579 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2580 continue;
2581 }
2582
2583 /* XXX: ch->last_poll = time_msec(); */
2584 if (error) {
2585 if (error == EAGAIN) {
2586 break;
2587 }
2588 return error;
2589 }
2590
2591 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2592 if (!error && dp_ifindex == dpif->dp_ifindex) {
2593 return 0;
2594 } else if (error) {
2595 return error;
2596 }
2597 }
2598 }
2599
2600 return EAGAIN;
2601 }
2602 #else
2603 static int
2604 dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
2605 struct dpif_upcall *upcall, struct ofpbuf *buf)
2606 OVS_REQ_RDLOCK(dpif->upcall_lock)
2607 {
2608 struct dpif_handler *handler;
2609 int read_tries = 0;
2610
2611 if (!dpif->handlers || handler_id >= dpif->n_handlers) {
2612 return EAGAIN;
2613 }
2614
2615 handler = &dpif->handlers[handler_id];
2616 if (handler->event_offset >= handler->n_events) {
2617 int retval;
2618
2619 handler->event_offset = handler->n_events = 0;
2620
2621 do {
2622 retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
2623 dpif->uc_array_size, 0);
2624 } while (retval < 0 && errno == EINTR);
2625
2626 if (retval < 0) {
2627 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2628 VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
2629 } else if (retval > 0) {
2630 handler->n_events = retval;
2631 }
2632 }
2633
2634 while (handler->event_offset < handler->n_events) {
2635 int idx = handler->epoll_events[handler->event_offset].data.u32;
2636 struct dpif_channel *ch = &dpif->channels[idx];
2637
2638 handler->event_offset++;
2639
2640 for (;;) {
2641 int dp_ifindex;
2642 int error;
2643
2644 if (++read_tries > 50) {
2645 return EAGAIN;
2646 }
2647
2648 error = nl_sock_recv(ch->sock, buf, NULL, false);
2649 if (error == ENOBUFS) {
2650 /* ENOBUFS typically means that we've received so many
2651 * packets that the buffer overflowed. Try again
2652 * immediately because there's almost certainly a packet
2653 * waiting for us. */
2654 report_loss(dpif, ch, idx, handler_id);
2655 continue;
2656 }
2657
2658 ch->last_poll = time_msec();
2659 if (error) {
2660 if (error == EAGAIN) {
2661 break;
2662 }
2663 return error;
2664 }
2665
2666 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2667 if (!error && dp_ifindex == dpif->dp_ifindex) {
2668 return 0;
2669 } else if (error) {
2670 return error;
2671 }
2672 }
2673 }
2674
2675 return EAGAIN;
2676 }
2677 #endif
2678
2679 static int
2680 dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
2681 struct dpif_upcall *upcall, struct ofpbuf *buf)
2682 {
2683 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2684 int error;
2685
2686 fat_rwlock_rdlock(&dpif->upcall_lock);
2687 #ifdef _WIN32
2688 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
2689 #else
2690 error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
2691 #endif
2692 fat_rwlock_unlock(&dpif->upcall_lock);
2693
2694 return error;
2695 }
2696
2697 static void
2698 dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
2699 OVS_REQ_RDLOCK(dpif->upcall_lock)
2700 {
2701 #ifdef _WIN32
2702 uint32_t i;
2703 struct dpif_windows_vport_sock *sock_pool =
2704 dpif->handlers[handler_id].vport_sock_pool;
2705
2706 /* Only one handler is supported currently. */
2707 if (handler_id >= 1) {
2708 return;
2709 }
2710
2711 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2712 nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
2713 }
2714 #else
2715 if (dpif->handlers && handler_id < dpif->n_handlers) {
2716 struct dpif_handler *handler = &dpif->handlers[handler_id];
2717
2718 poll_fd_wait(handler->epoll_fd, POLLIN);
2719 }
2720 #endif
2721 }
2722
2723 static void
2724 dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
2725 {
2726 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2727
2728 fat_rwlock_rdlock(&dpif->upcall_lock);
2729 dpif_netlink_recv_wait__(dpif, handler_id);
2730 fat_rwlock_unlock(&dpif->upcall_lock);
2731 }
2732
2733 static void
2734 dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
2735 OVS_REQ_WRLOCK(dpif->upcall_lock)
2736 {
2737 if (dpif->handlers) {
2738 size_t i;
2739
2740 if (!dpif->channels[0].sock) {
2741 return;
2742 }
2743 for (i = 0; i < dpif->uc_array_size; i++ ) {
2744
2745 nl_sock_drain(dpif->channels[i].sock);
2746 }
2747 }
2748 }
2749
2750 static void
2751 dpif_netlink_recv_purge(struct dpif *dpif_)
2752 {
2753 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2754
2755 fat_rwlock_wrlock(&dpif->upcall_lock);
2756 dpif_netlink_recv_purge__(dpif);
2757 fat_rwlock_unlock(&dpif->upcall_lock);
2758 }
2759
2760 static char *
2761 dpif_netlink_get_datapath_version(void)
2762 {
2763 char *version_str = NULL;
2764
2765 #ifdef __linux__
2766
2767 #define MAX_VERSION_STR_SIZE 80
2768 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2769 FILE *f;
2770
2771 f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
2772 if (f) {
2773 char *newline;
2774 char version[MAX_VERSION_STR_SIZE];
2775
2776 if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
2777 newline = strchr(version, '\n');
2778 if (newline) {
2779 *newline = '\0';
2780 }
2781 version_str = xstrdup(version);
2782 }
2783 fclose(f);
2784 }
2785 #endif
2786
2787 return version_str;
2788 }
2789
2790 struct dpif_netlink_ct_dump_state {
2791 struct ct_dpif_dump_state up;
2792 struct nl_ct_dump_state *nl_ct_dump;
2793 };
2794
2795 static int
2796 dpif_netlink_ct_dump_start(struct dpif *dpif OVS_UNUSED,
2797 struct ct_dpif_dump_state **dump_,
2798 const uint16_t *zone, int *ptot_bkts)
2799 {
2800 struct dpif_netlink_ct_dump_state *dump;
2801 int err;
2802
2803 dump = xzalloc(sizeof *dump);
2804 err = nl_ct_dump_start(&dump->nl_ct_dump, zone, ptot_bkts);
2805 if (err) {
2806 free(dump);
2807 return err;
2808 }
2809
2810 *dump_ = &dump->up;
2811
2812 return 0;
2813 }
2814
2815 static int
2816 dpif_netlink_ct_dump_next(struct dpif *dpif OVS_UNUSED,
2817 struct ct_dpif_dump_state *dump_,
2818 struct ct_dpif_entry *entry)
2819 {
2820 struct dpif_netlink_ct_dump_state *dump;
2821
2822 INIT_CONTAINER(dump, dump_, up);
2823
2824 return nl_ct_dump_next(dump->nl_ct_dump, entry);
2825 }
2826
2827 static int
2828 dpif_netlink_ct_dump_done(struct dpif *dpif OVS_UNUSED,
2829 struct ct_dpif_dump_state *dump_)
2830 {
2831 struct dpif_netlink_ct_dump_state *dump;
2832
2833 INIT_CONTAINER(dump, dump_, up);
2834
2835 int err = nl_ct_dump_done(dump->nl_ct_dump);
2836 free(dump);
2837 return err;
2838 }
2839
2840 static int
2841 dpif_netlink_ct_flush(struct dpif *dpif OVS_UNUSED, const uint16_t *zone,
2842 const struct ct_dpif_tuple *tuple)
2843 {
2844 if (tuple) {
2845 return nl_ct_flush_tuple(tuple, zone ? *zone : 0);
2846 } else if (zone) {
2847 return nl_ct_flush_zone(*zone);
2848 } else {
2849 return nl_ct_flush();
2850 }
2851 }
2852
2853 static int
2854 dpif_netlink_ct_set_limits(struct dpif *dpif OVS_UNUSED,
2855 const uint32_t *default_limits,
2856 const struct ovs_list *zone_limits)
2857 {
2858 struct ovs_zone_limit req_zone_limit;
2859
2860 if (ovs_ct_limit_family < 0) {
2861 return EOPNOTSUPP;
2862 }
2863
2864 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
2865 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
2866 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_SET,
2867 OVS_CT_LIMIT_VERSION);
2868
2869 struct ovs_header *ovs_header;
2870 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
2871 ovs_header->dp_ifindex = 0;
2872
2873 size_t opt_offset;
2874 opt_offset = nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2875 if (default_limits) {
2876 req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
2877 req_zone_limit.limit = *default_limits;
2878 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2879 }
2880
2881 if (!ovs_list_is_empty(zone_limits)) {
2882 struct ct_dpif_zone_limit *zone_limit;
2883
2884 LIST_FOR_EACH (zone_limit, node, zone_limits) {
2885 req_zone_limit.zone_id = zone_limit->zone;
2886 req_zone_limit.limit = zone_limit->limit;
2887 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2888 }
2889 }
2890 nl_msg_end_nested(request, opt_offset);
2891
2892 int err = nl_transact(NETLINK_GENERIC, request, NULL);
2893 ofpbuf_delete(request);
2894 return err;
2895 }
2896
2897 static int
2898 dpif_netlink_zone_limits_from_ofpbuf(const struct ofpbuf *buf,
2899 uint32_t *default_limit,
2900 struct ovs_list *zone_limits)
2901 {
2902 static const struct nl_policy ovs_ct_limit_policy[] = {
2903 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NL_A_NESTED,
2904 .optional = true },
2905 };
2906
2907 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
2908 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2909 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
2910 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2911
2912 struct nlattr *attr[ARRAY_SIZE(ovs_ct_limit_policy)];
2913
2914 if (!nlmsg || !genl || !ovs_header
2915 || nlmsg->nlmsg_type != ovs_ct_limit_family
2916 || !nl_policy_parse(&b, 0, ovs_ct_limit_policy, attr,
2917 ARRAY_SIZE(ovs_ct_limit_policy))) {
2918 return EINVAL;
2919 }
2920
2921
2922 if (!attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2923 return EINVAL;
2924 }
2925
2926 int rem = NLA_ALIGN(
2927 nl_attr_get_size(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]));
2928 const struct ovs_zone_limit *zone_limit =
2929 nl_attr_get(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]);
2930
2931 while (rem >= sizeof *zone_limit) {
2932 if (zone_limit->zone_id == OVS_ZONE_LIMIT_DEFAULT_ZONE) {
2933 *default_limit = zone_limit->limit;
2934 } else if (zone_limit->zone_id < OVS_ZONE_LIMIT_DEFAULT_ZONE ||
2935 zone_limit->zone_id > UINT16_MAX) {
2936 } else {
2937 ct_dpif_push_zone_limit(zone_limits, zone_limit->zone_id,
2938 zone_limit->limit, zone_limit->count);
2939 }
2940 rem -= NLA_ALIGN(sizeof *zone_limit);
2941 zone_limit = ALIGNED_CAST(struct ovs_zone_limit *,
2942 (unsigned char *) zone_limit + NLA_ALIGN(sizeof *zone_limit));
2943 }
2944 return 0;
2945 }
2946
2947 static int
2948 dpif_netlink_ct_get_limits(struct dpif *dpif OVS_UNUSED,
2949 uint32_t *default_limit,
2950 const struct ovs_list *zone_limits_request,
2951 struct ovs_list *zone_limits_reply)
2952 {
2953 if (ovs_ct_limit_family < 0) {
2954 return EOPNOTSUPP;
2955 }
2956
2957 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
2958 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
2959 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_GET,
2960 OVS_CT_LIMIT_VERSION);
2961
2962 struct ovs_header *ovs_header;
2963 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
2964 ovs_header->dp_ifindex = 0;
2965
2966 if (!ovs_list_is_empty(zone_limits_request)) {
2967 size_t opt_offset = nl_msg_start_nested(request,
2968 OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2969
2970 struct ovs_zone_limit req_zone_limit;
2971 req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
2972 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2973
2974 struct ct_dpif_zone_limit *zone_limit;
2975 LIST_FOR_EACH (zone_limit, node, zone_limits_request) {
2976 req_zone_limit.zone_id = zone_limit->zone;
2977 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
2978 }
2979
2980 nl_msg_end_nested(request, opt_offset);
2981 }
2982
2983 struct ofpbuf *reply;
2984 int err = nl_transact(NETLINK_GENERIC, request, &reply);
2985 if (err) {
2986 goto out;
2987 }
2988
2989 err = dpif_netlink_zone_limits_from_ofpbuf(reply, default_limit,
2990 zone_limits_reply);
2991
2992 out:
2993 ofpbuf_delete(request);
2994 ofpbuf_delete(reply);
2995 return err;
2996 }
2997
2998 static int
2999 dpif_netlink_ct_del_limits(struct dpif *dpif OVS_UNUSED,
3000 const struct ovs_list *zone_limits)
3001 {
3002 if (ovs_ct_limit_family < 0) {
3003 return EOPNOTSUPP;
3004 }
3005
3006 struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
3007 nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
3008 NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_DEL,
3009 OVS_CT_LIMIT_VERSION);
3010
3011 struct ovs_header *ovs_header;
3012 ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
3013 ovs_header->dp_ifindex = 0;
3014
3015 if (!ovs_list_is_empty(zone_limits)) {
3016 size_t opt_offset =
3017 nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
3018
3019 struct ct_dpif_zone_limit *zone_limit;
3020 LIST_FOR_EACH (zone_limit, node, zone_limits) {
3021 struct ovs_zone_limit req_zone_limit;
3022 req_zone_limit.zone_id = zone_limit->zone;
3023 nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
3024 }
3025 nl_msg_end_nested(request, opt_offset);
3026 }
3027
3028 int err = nl_transact(NETLINK_GENERIC, request, NULL);
3029
3030 ofpbuf_delete(request);
3031 return err;
3032 }
3033
3034 #define NL_TP_NAME_PREFIX "ovs_tp_"
3035
3036 struct dpif_netlink_timeout_policy_protocol {
3037 uint16_t l3num;
3038 uint8_t l4num;
3039 };
3040
3041 enum OVS_PACKED_ENUM dpif_netlink_support_timeout_policy_protocol {
3042 DPIF_NL_TP_AF_INET_TCP,
3043 DPIF_NL_TP_AF_INET_UDP,
3044 DPIF_NL_TP_AF_INET_ICMP,
3045 DPIF_NL_TP_AF_INET6_TCP,
3046 DPIF_NL_TP_AF_INET6_UDP,
3047 DPIF_NL_TP_AF_INET6_ICMPV6,
3048 DPIF_NL_TP_MAX
3049 };
3050
3051 #define DPIF_NL_ALL_TP ((1UL << DPIF_NL_TP_MAX) - 1)
3052
3053
3054 static struct dpif_netlink_timeout_policy_protocol tp_protos[] = {
3055 [DPIF_NL_TP_AF_INET_TCP] = { .l3num = AF_INET, .l4num = IPPROTO_TCP },
3056 [DPIF_NL_TP_AF_INET_UDP] = { .l3num = AF_INET, .l4num = IPPROTO_UDP },
3057 [DPIF_NL_TP_AF_INET_ICMP] = { .l3num = AF_INET, .l4num = IPPROTO_ICMP },
3058 [DPIF_NL_TP_AF_INET6_TCP] = { .l3num = AF_INET6, .l4num = IPPROTO_TCP },
3059 [DPIF_NL_TP_AF_INET6_UDP] = { .l3num = AF_INET6, .l4num = IPPROTO_UDP },
3060 [DPIF_NL_TP_AF_INET6_ICMPV6] = { .l3num = AF_INET6,
3061 .l4num = IPPROTO_ICMPV6 },
3062 };
3063
3064 static void
3065 dpif_netlink_format_tp_name(uint32_t id, uint16_t l3num, uint8_t l4num,
3066 char **tp_name)
3067 {
3068 struct ds ds = DS_EMPTY_INITIALIZER;
3069 ds_put_format(&ds, "%s%"PRIu32"_", NL_TP_NAME_PREFIX, id);
3070 ct_dpif_format_ipproto(&ds, l4num);
3071
3072 if (l3num == AF_INET) {
3073 ds_put_cstr(&ds, "4");
3074 } else if (l3num == AF_INET6 && l4num != IPPROTO_ICMPV6) {
3075 ds_put_cstr(&ds, "6");
3076 }
3077
3078 ovs_assert(ds.length < CTNL_TIMEOUT_NAME_MAX);
3079
3080 *tp_name = ds_steal_cstr(&ds);
3081 }
3082
3083 static int
3084 dpif_netlink_ct_get_timeout_policy_name(struct dpif *dpif OVS_UNUSED,
3085 uint32_t tp_id, uint16_t dl_type,
3086 uint8_t nw_proto, char **tp_name,
3087 bool *is_generic)
3088 {
3089 dpif_netlink_format_tp_name(tp_id,
3090 dl_type == ETH_TYPE_IP ? AF_INET : AF_INET6,
3091 nw_proto, tp_name);
3092 *is_generic = false;
3093 return 0;
3094 }
3095
3096 #define CT_DPIF_NL_TP_TCP_MAPPINGS \
3097 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT, SYN_SENT) \
3098 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_RECV, SYN_RECV) \
3099 CT_DPIF_NL_TP_MAPPING(TCP, TCP, ESTABLISHED, ESTABLISHED) \
3100 CT_DPIF_NL_TP_MAPPING(TCP, TCP, FIN_WAIT, FIN_WAIT) \
3101 CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE_WAIT, CLOSE_WAIT) \
3102 CT_DPIF_NL_TP_MAPPING(TCP, TCP, LAST_ACK, LAST_ACK) \
3103 CT_DPIF_NL_TP_MAPPING(TCP, TCP, TIME_WAIT, TIME_WAIT) \
3104 CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE, CLOSE) \
3105 CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT2, SYN_SENT2) \
3106 CT_DPIF_NL_TP_MAPPING(TCP, TCP, RETRANSMIT, RETRANS) \
3107 CT_DPIF_NL_TP_MAPPING(TCP, TCP, UNACK, UNACK)
3108
3109 #define CT_DPIF_NL_TP_UDP_MAPPINGS \
3110 CT_DPIF_NL_TP_MAPPING(UDP, UDP, SINGLE, UNREPLIED) \
3111 CT_DPIF_NL_TP_MAPPING(UDP, UDP, MULTIPLE, REPLIED)
3112
3113 #define CT_DPIF_NL_TP_ICMP_MAPPINGS \
3114 CT_DPIF_NL_TP_MAPPING(ICMP, ICMP, FIRST, TIMEOUT)
3115
3116 #define CT_DPIF_NL_TP_ICMPV6_MAPPINGS \
3117 CT_DPIF_NL_TP_MAPPING(ICMP, ICMPV6, FIRST, TIMEOUT)
3118
3119
3120 #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
3121 if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
3122 nl_tp->present |= 1 << CTA_TIMEOUT_##PROTO2##_##ATTR2; \
3123 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2] = \
3124 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]; \
3125 }
3126
3127 static void
3128 dpif_netlink_get_nl_tp_tcp_attrs(const struct ct_dpif_timeout_policy *tp,
3129 struct nl_ct_timeout_policy *nl_tp)
3130 {
3131 CT_DPIF_NL_TP_TCP_MAPPINGS
3132 }
3133
3134 static void
3135 dpif_netlink_get_nl_tp_udp_attrs(const struct ct_dpif_timeout_policy *tp,
3136 struct nl_ct_timeout_policy *nl_tp)
3137 {
3138 CT_DPIF_NL_TP_UDP_MAPPINGS
3139 }
3140
3141 static void
3142 dpif_netlink_get_nl_tp_icmp_attrs(const struct ct_dpif_timeout_policy *tp,
3143 struct nl_ct_timeout_policy *nl_tp)
3144 {
3145 CT_DPIF_NL_TP_ICMP_MAPPINGS
3146 }
3147
3148 static void
3149 dpif_netlink_get_nl_tp_icmpv6_attrs(const struct ct_dpif_timeout_policy *tp,
3150 struct nl_ct_timeout_policy *nl_tp)
3151 {
3152 CT_DPIF_NL_TP_ICMPV6_MAPPINGS
3153 }
3154
3155 #undef CT_DPIF_NL_TP_MAPPING
3156
3157 static void
3158 dpif_netlink_get_nl_tp_attrs(const struct ct_dpif_timeout_policy *tp,
3159 uint8_t l4num, struct nl_ct_timeout_policy *nl_tp)
3160 {
3161 nl_tp->present = 0;
3162
3163 if (l4num == IPPROTO_TCP) {
3164 dpif_netlink_get_nl_tp_tcp_attrs(tp, nl_tp);
3165 } else if (l4num == IPPROTO_UDP) {
3166 dpif_netlink_get_nl_tp_udp_attrs(tp, nl_tp);
3167 } else if (l4num == IPPROTO_ICMP) {
3168 dpif_netlink_get_nl_tp_icmp_attrs(tp, nl_tp);
3169 } else if (l4num == IPPROTO_ICMPV6) {
3170 dpif_netlink_get_nl_tp_icmpv6_attrs(tp, nl_tp);
3171 }
3172 }
3173
3174 #define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
3175 if (nl_tp->present & (1 << CTA_TIMEOUT_##PROTO2##_##ATTR2)) { \
3176 if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
3177 if (tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] != \
3178 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]) { \
3179 VLOG_WARN_RL(&error_rl, "Inconsistent timeout policy %s " \
3180 "attribute %s=%"PRIu32" while %s=%"PRIu32, \
3181 nl_tp->name, "CTA_TIMEOUT_"#PROTO2"_"#ATTR2, \
3182 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2], \
3183 "CT_DPIF_TP_ATTR_"#PROTO1"_"#ATTR1, \
3184 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]); \
3185 } \
3186 } else { \
3187 tp->present |= 1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1; \
3188 tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] = \
3189 nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]; \
3190 } \
3191 }
3192
3193 static void
3194 dpif_netlink_set_ct_dpif_tp_tcp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3195 struct ct_dpif_timeout_policy *tp)
3196 {
3197 CT_DPIF_NL_TP_TCP_MAPPINGS
3198 }
3199
3200 static void
3201 dpif_netlink_set_ct_dpif_tp_udp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3202 struct ct_dpif_timeout_policy *tp)
3203 {
3204 CT_DPIF_NL_TP_UDP_MAPPINGS
3205 }
3206
3207 static void
3208 dpif_netlink_set_ct_dpif_tp_icmp_attrs(
3209 const struct nl_ct_timeout_policy *nl_tp,
3210 struct ct_dpif_timeout_policy *tp)
3211 {
3212 CT_DPIF_NL_TP_ICMP_MAPPINGS
3213 }
3214
3215 static void
3216 dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(
3217 const struct nl_ct_timeout_policy *nl_tp,
3218 struct ct_dpif_timeout_policy *tp)
3219 {
3220 CT_DPIF_NL_TP_ICMPV6_MAPPINGS
3221 }
3222
3223 #undef CT_DPIF_NL_TP_MAPPING
3224
3225 static void
3226 dpif_netlink_set_ct_dpif_tp_attrs(const struct nl_ct_timeout_policy *nl_tp,
3227 struct ct_dpif_timeout_policy *tp)
3228 {
3229 if (nl_tp->l4num == IPPROTO_TCP) {
3230 dpif_netlink_set_ct_dpif_tp_tcp_attrs(nl_tp, tp);
3231 } else if (nl_tp->l4num == IPPROTO_UDP) {
3232 dpif_netlink_set_ct_dpif_tp_udp_attrs(nl_tp, tp);
3233 } else if (nl_tp->l4num == IPPROTO_ICMP) {
3234 dpif_netlink_set_ct_dpif_tp_icmp_attrs(nl_tp, tp);
3235 } else if (nl_tp->l4num == IPPROTO_ICMPV6) {
3236 dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(nl_tp, tp);
3237 }
3238 }
3239
3240 #ifdef _WIN32
3241 static int
3242 dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
3243 const struct ct_dpif_timeout_policy *tp)
3244 {
3245 return EOPNOTSUPP;
3246 }
3247
3248 static int
3249 dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
3250 uint32_t tp_id,
3251 struct ct_dpif_timeout_policy *tp)
3252 {
3253 return EOPNOTSUPP;
3254 }
3255
3256 static int
3257 dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
3258 uint32_t tp_id)
3259 {
3260 return EOPNOTSUPP;
3261 }
3262
3263 static int
3264 dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
3265 void **statep)
3266 {
3267 return EOPNOTSUPP;
3268 }
3269
3270 static int
3271 dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
3272 void *state,
3273 struct ct_dpif_timeout_policy **tp)
3274 {
3275 return EOPNOTSUPP;
3276 }
3277
3278 static int
3279 dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
3280 void *state)
3281 {
3282 return EOPNOTSUPP;
3283 }
3284 #else
3285 static int
3286 dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
3287 const struct ct_dpif_timeout_policy *tp)
3288 {
3289 int err = 0;
3290
3291 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3292 struct nl_ct_timeout_policy nl_tp;
3293 char *nl_tp_name;
3294
3295 dpif_netlink_format_tp_name(tp->id, tp_protos[i].l3num,
3296 tp_protos[i].l4num, &nl_tp_name);
3297 ovs_strlcpy(nl_tp.name, nl_tp_name, sizeof nl_tp.name);
3298 free(nl_tp_name);
3299
3300 nl_tp.l3num = tp_protos[i].l3num;
3301 nl_tp.l4num = tp_protos[i].l4num;
3302 dpif_netlink_get_nl_tp_attrs(tp, tp_protos[i].l4num, &nl_tp);
3303 err = nl_ct_set_timeout_policy(&nl_tp);
3304 if (err) {
3305 VLOG_WARN_RL(&error_rl, "failed to add timeout policy %s (%s)",
3306 nl_tp.name, ovs_strerror(err));
3307 goto out;
3308 }
3309 }
3310
3311 out:
3312 return err;
3313 }
3314
3315 static int
3316 dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
3317 uint32_t tp_id,
3318 struct ct_dpif_timeout_policy *tp)
3319 {
3320 int err = 0;
3321
3322 tp->id = tp_id;
3323 tp->present = 0;
3324 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3325 struct nl_ct_timeout_policy nl_tp;
3326 char *nl_tp_name;
3327
3328 dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
3329 tp_protos[i].l4num, &nl_tp_name);
3330 err = nl_ct_get_timeout_policy(nl_tp_name, &nl_tp);
3331
3332 if (err) {
3333 VLOG_WARN_RL(&error_rl, "failed to get timeout policy %s (%s)",
3334 nl_tp_name, ovs_strerror(err));
3335 free(nl_tp_name);
3336 goto out;
3337 }
3338 free(nl_tp_name);
3339 dpif_netlink_set_ct_dpif_tp_attrs(&nl_tp, tp);
3340 }
3341
3342 out:
3343 return err;
3344 }
3345
3346 /* Returns 0 if all the sub timeout policies are deleted or not exist in the
3347 * kernel. Returns 1 if any sub timeout policy deletion failed. */
3348 static int
3349 dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
3350 uint32_t tp_id)
3351 {
3352 int ret = 0;
3353
3354 for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
3355 char *nl_tp_name;
3356 dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
3357 tp_protos[i].l4num, &nl_tp_name);
3358 int err = nl_ct_del_timeout_policy(nl_tp_name);
3359 if (err == ENOENT) {
3360 err = 0;
3361 }
3362 if (err) {
3363 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(6, 6);
3364 VLOG_INFO_RL(&rl, "failed to delete timeout policy %s (%s)",
3365 nl_tp_name, ovs_strerror(err));
3366 ret = 1;
3367 }
3368 free(nl_tp_name);
3369 }
3370
3371 return ret;
3372 }
3373
3374 struct dpif_netlink_ct_timeout_policy_dump_state {
3375 struct nl_ct_timeout_policy_dump_state *nl_dump_state;
3376 struct hmap tp_dump_map;
3377 };
3378
3379 struct dpif_netlink_tp_dump_node {
3380 struct hmap_node hmap_node; /* node in tp_dump_map. */
3381 struct ct_dpif_timeout_policy *tp;
3382 uint32_t l3_l4_present;
3383 };
3384
3385 static struct dpif_netlink_tp_dump_node *
3386 get_dpif_netlink_tp_dump_node_by_tp_id(uint32_t tp_id,
3387 struct hmap *tp_dump_map)
3388 {
3389 struct dpif_netlink_tp_dump_node *tp_dump_node;
3390
3391 HMAP_FOR_EACH_WITH_HASH (tp_dump_node, hmap_node, hash_int(tp_id, 0),
3392 tp_dump_map) {
3393 if (tp_dump_node->tp->id == tp_id) {
3394 return tp_dump_node;
3395 }
3396 }
3397 return NULL;
3398 }
3399
3400 static void
3401 update_dpif_netlink_tp_dump_node(
3402 const struct nl_ct_timeout_policy *nl_tp,
3403 struct dpif_netlink_tp_dump_node *tp_dump_node)
3404 {
3405 dpif_netlink_set_ct_dpif_tp_attrs(nl_tp, tp_dump_node->tp);
3406 for (int i = 0; i < DPIF_NL_TP_MAX; ++i) {
3407 if (nl_tp->l3num == tp_protos[i].l3num &&
3408 nl_tp->l4num == tp_protos[i].l4num) {
3409 tp_dump_node->l3_l4_present |= 1 << i;
3410 break;
3411 }
3412 }
3413 }
3414
3415 static int
3416 dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
3417 void **statep)
3418 {
3419 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state;
3420
3421 *statep = dump_state = xzalloc(sizeof *dump_state);
3422 int err = nl_ct_timeout_policy_dump_start(&dump_state->nl_dump_state);
3423 if (err) {
3424 free(dump_state);
3425 return err;
3426 }
3427 hmap_init(&dump_state->tp_dump_map);
3428 return 0;
3429 }
3430
3431 static void
3432 get_and_cleanup_tp_dump_node(struct hmap *hmap,
3433 struct dpif_netlink_tp_dump_node *tp_dump_node,
3434 struct ct_dpif_timeout_policy *tp)
3435 {
3436 hmap_remove(hmap, &tp_dump_node->hmap_node);
3437 *tp = *tp_dump_node->tp;
3438 free(tp_dump_node->tp);
3439 free(tp_dump_node);
3440 }
3441
3442 static int
3443 dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
3444 void *state,
3445 struct ct_dpif_timeout_policy *tp)
3446 {
3447 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
3448 struct dpif_netlink_tp_dump_node *tp_dump_node;
3449 int err;
3450
3451 /* Dumps all the timeout policies in the kernel. */
3452 do {
3453 struct nl_ct_timeout_policy nl_tp;
3454 uint32_t tp_id;
3455
3456 err = nl_ct_timeout_policy_dump_next(dump_state->nl_dump_state,
3457 &nl_tp);
3458 if (err) {
3459 break;
3460 }
3461
3462 /* We only interest in OVS installed timeout policies. */
3463 if (!ovs_scan(nl_tp.name, NL_TP_NAME_PREFIX"%"PRIu32, &tp_id)) {
3464 continue;
3465 }
3466
3467 tp_dump_node = get_dpif_netlink_tp_dump_node_by_tp_id(
3468 tp_id, &dump_state->tp_dump_map);
3469 if (!tp_dump_node) {
3470 tp_dump_node = xzalloc(sizeof *tp_dump_node);
3471 tp_dump_node->tp = xzalloc(sizeof *tp_dump_node->tp);
3472 tp_dump_node->tp->id = tp_id;
3473 hmap_insert(&dump_state->tp_dump_map, &tp_dump_node->hmap_node,
3474 hash_int(tp_id, 0));
3475 }
3476
3477 update_dpif_netlink_tp_dump_node(&nl_tp, tp_dump_node);
3478
3479 /* Returns one ct_dpif_timeout_policy if we gather all the L3/L4
3480 * sub-pieces. */
3481 if (tp_dump_node->l3_l4_present == DPIF_NL_ALL_TP) {
3482 get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
3483 tp_dump_node, tp);
3484 break;
3485 }
3486 } while (true);
3487
3488 /* Dump the incomplete timeout policies. */
3489 if (err == EOF) {
3490 if (!hmap_is_empty(&dump_state->tp_dump_map)) {
3491 struct hmap_node *hmap_node = hmap_first(&dump_state->tp_dump_map);
3492 tp_dump_node = CONTAINER_OF(hmap_node,
3493 struct dpif_netlink_tp_dump_node,
3494 hmap_node);
3495 get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
3496 tp_dump_node, tp);
3497 return 0;
3498 }
3499 }
3500
3501 return err;
3502 }
3503
3504 static int
3505 dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
3506 void *state)
3507 {
3508 struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
3509 struct dpif_netlink_tp_dump_node *tp_dump_node;
3510
3511 int err = nl_ct_timeout_policy_dump_done(dump_state->nl_dump_state);
3512 HMAP_FOR_EACH_POP (tp_dump_node, hmap_node, &dump_state->tp_dump_map) {
3513 free(tp_dump_node->tp);
3514 free(tp_dump_node);
3515 }
3516 hmap_destroy(&dump_state->tp_dump_map);
3517 free(dump_state);
3518 return err;
3519 }
3520 #endif
3521
3522 \f
3523 /* Meters */
3524
3525 /* Set of supported meter flags */
3526 #define DP_SUPPORTED_METER_FLAGS_MASK \
3527 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
3528
3529 /* Meter support was introduced in Linux 4.15. In some versions of
3530 * Linux 4.15, 4.16, and 4.17, there was a bug that never set the id
3531 * when the meter was created, so all meters essentially had an id of
3532 * zero. Check for that condition and disable meters on those kernels. */
3533 static bool probe_broken_meters(struct dpif *);
3534
3535 static void
3536 dpif_netlink_meter_init(struct dpif_netlink *dpif, struct ofpbuf *buf,
3537 void *stub, size_t size, uint32_t command)
3538 {
3539 ofpbuf_use_stub(buf, stub, size);
3540
3541 nl_msg_put_genlmsghdr(buf, 0, ovs_meter_family, NLM_F_REQUEST | NLM_F_ECHO,
3542 command, OVS_METER_VERSION);
3543
3544 struct ovs_header *ovs_header;
3545 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
3546 ovs_header->dp_ifindex = dpif->dp_ifindex;
3547 }
3548
3549 /* Execute meter 'request' in the kernel datapath. If the command
3550 * fails, returns a positive errno value. Otherwise, stores the reply
3551 * in '*replyp', parses the policy according to 'reply_policy' into the
3552 * array of Netlink attribute in 'a', and returns 0. On success, the
3553 * caller is responsible for calling ofpbuf_delete() on '*replyp'
3554 * ('replyp' will contain pointers into 'a'). */
3555 static int
3556 dpif_netlink_meter_transact(struct ofpbuf *request, struct ofpbuf **replyp,
3557 const struct nl_policy *reply_policy,
3558 struct nlattr **a, size_t size_a)
3559 {
3560 int error = nl_transact(NETLINK_GENERIC, request, replyp);
3561 ofpbuf_uninit(request);
3562
3563 if (error) {
3564 return error;
3565 }
3566
3567 struct nlmsghdr *nlmsg = ofpbuf_try_pull(*replyp, sizeof *nlmsg);
3568 struct genlmsghdr *genl = ofpbuf_try_pull(*replyp, sizeof *genl);
3569 struct ovs_header *ovs_header = ofpbuf_try_pull(*replyp,
3570 sizeof *ovs_header);
3571 if (!nlmsg || !genl || !ovs_header
3572 || nlmsg->nlmsg_type != ovs_meter_family
3573 || !nl_policy_parse(*replyp, 0, reply_policy, a, size_a)) {
3574 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3575 VLOG_DBG_RL(&rl,
3576 "Kernel module response to meter tranaction is invalid");
3577 return EINVAL;
3578 }
3579 return 0;
3580 }
3581
3582 static void
3583 dpif_netlink_meter_get_features(const struct dpif *dpif_,
3584 struct ofputil_meter_features *features)
3585 {
3586 if (probe_broken_meters(CONST_CAST(struct dpif *, dpif_))) {
3587 features = NULL;
3588 return;
3589 }
3590
3591 struct ofpbuf buf, *msg;
3592 uint64_t stub[1024 / 8];
3593
3594 static const struct nl_policy ovs_meter_features_policy[] = {
3595 [OVS_METER_ATTR_MAX_METERS] = { .type = NL_A_U32 },
3596 [OVS_METER_ATTR_MAX_BANDS] = { .type = NL_A_U32 },
3597 [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
3598 };
3599 struct nlattr *a[ARRAY_SIZE(ovs_meter_features_policy)];
3600
3601 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3602 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub,
3603 OVS_METER_CMD_FEATURES);
3604 if (dpif_netlink_meter_transact(&buf, &msg, ovs_meter_features_policy, a,
3605 ARRAY_SIZE(ovs_meter_features_policy))) {
3606 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3607 VLOG_INFO_RL(&rl,
3608 "dpif_netlink_meter_transact OVS_METER_CMD_FEATURES failed");
3609 return;
3610 }
3611
3612 features->max_meters = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_METERS]);
3613 features->max_bands = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_BANDS]);
3614
3615 /* Bands is a nested attribute of zero or more nested
3616 * band attributes. */
3617 if (a[OVS_METER_ATTR_BANDS]) {
3618 const struct nlattr *nla;
3619 size_t left;
3620
3621 NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
3622 const struct nlattr *band_nla;
3623 size_t band_left;
3624
3625 NL_NESTED_FOR_EACH (band_nla, band_left, nla) {
3626 if (nl_attr_type(band_nla) == OVS_BAND_ATTR_TYPE) {
3627 if (nl_attr_get_size(band_nla) == sizeof(uint32_t)) {
3628 switch (nl_attr_get_u32(band_nla)) {
3629 case OVS_METER_BAND_TYPE_DROP:
3630 features->band_types |= 1 << OFPMBT13_DROP;
3631 break;
3632 }
3633 }
3634 }
3635 }
3636 }
3637 }
3638 features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK;
3639
3640 ofpbuf_delete(msg);
3641 }
3642
3643 static int
3644 dpif_netlink_meter_set__(struct dpif *dpif_, ofproto_meter_id meter_id,
3645 struct ofputil_meter_config *config)
3646 {
3647 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3648 struct ofpbuf buf, *msg;
3649 uint64_t stub[1024 / 8];
3650
3651 static const struct nl_policy ovs_meter_set_response_policy[] = {
3652 [OVS_METER_ATTR_ID] = { .type = NL_A_U32 },
3653 };
3654 struct nlattr *a[ARRAY_SIZE(ovs_meter_set_response_policy)];
3655
3656 if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK) {
3657 return EBADF; /* Unsupported flags set */
3658 }
3659
3660 for (size_t i = 0; i < config->n_bands; i++) {
3661 switch (config->bands[i].type) {
3662 case OFPMBT13_DROP:
3663 break;
3664 default:
3665 return ENODEV; /* Unsupported band type */
3666 }
3667 }
3668
3669 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, OVS_METER_CMD_SET);
3670
3671 nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
3672
3673 if (config->flags & OFPMF13_KBPS) {
3674 nl_msg_put_flag(&buf, OVS_METER_ATTR_KBPS);
3675 }
3676
3677 size_t bands_offset = nl_msg_start_nested(&buf, OVS_METER_ATTR_BANDS);
3678 /* Bands */
3679 for (size_t i = 0; i < config->n_bands; ++i) {
3680 struct ofputil_meter_band * band = &config->bands[i];
3681 uint32_t band_type;
3682
3683 size_t band_offset = nl_msg_start_nested(&buf, OVS_BAND_ATTR_UNSPEC);
3684
3685 switch (band->type) {
3686 case OFPMBT13_DROP:
3687 band_type = OVS_METER_BAND_TYPE_DROP;
3688 break;
3689 default:
3690 band_type = OVS_METER_BAND_TYPE_UNSPEC;
3691 }
3692 nl_msg_put_u32(&buf, OVS_BAND_ATTR_TYPE, band_type);
3693 nl_msg_put_u32(&buf, OVS_BAND_ATTR_RATE, band->rate);
3694 nl_msg_put_u32(&buf, OVS_BAND_ATTR_BURST,
3695 config->flags & OFPMF13_BURST ?
3696 band->burst_size : band->rate);
3697 nl_msg_end_nested(&buf, band_offset);
3698 }
3699 nl_msg_end_nested(&buf, bands_offset);
3700
3701 int error = dpif_netlink_meter_transact(&buf, &msg,
3702 ovs_meter_set_response_policy, a,
3703 ARRAY_SIZE(ovs_meter_set_response_policy));
3704 if (error) {
3705 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3706 VLOG_INFO_RL(&rl,
3707 "dpif_netlink_meter_transact OVS_METER_CMD_SET failed");
3708 return error;
3709 }
3710
3711 if (nl_attr_get_u32(a[OVS_METER_ATTR_ID]) != meter_id.uint32) {
3712 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3713 VLOG_INFO_RL(&rl,
3714 "Kernel returned a different meter id than requested");
3715 }
3716 ofpbuf_delete(msg);
3717 return 0;
3718 }
3719
3720 static int
3721 dpif_netlink_meter_set(struct dpif *dpif_, ofproto_meter_id meter_id,
3722 struct ofputil_meter_config *config)
3723 {
3724 if (probe_broken_meters(dpif_)) {
3725 return ENOMEM;
3726 }
3727
3728 return dpif_netlink_meter_set__(dpif_, meter_id, config);
3729 }
3730
3731 /* Retrieve statistics and/or delete meter 'meter_id'. Statistics are
3732 * stored in 'stats', if it is not null. If 'command' is
3733 * OVS_METER_CMD_DEL, the meter is deleted and statistics are optionally
3734 * retrieved. If 'command' is OVS_METER_CMD_GET, then statistics are
3735 * simply retrieved. */
3736 static int
3737 dpif_netlink_meter_get_stats(const struct dpif *dpif_,
3738 ofproto_meter_id meter_id,
3739 struct ofputil_meter_stats *stats,
3740 uint16_t max_bands,
3741 enum ovs_meter_cmd command)
3742 {
3743 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3744 struct ofpbuf buf, *msg;
3745 uint64_t stub[1024 / 8];
3746
3747 static const struct nl_policy ovs_meter_stats_policy[] = {
3748 [OVS_METER_ATTR_ID] = { .type = NL_A_U32, .optional = true},
3749 [OVS_METER_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
3750 .optional = true},
3751 [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
3752 };
3753 struct nlattr *a[ARRAY_SIZE(ovs_meter_stats_policy)];
3754
3755 dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, command);
3756
3757 nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
3758
3759 int error = dpif_netlink_meter_transact(&buf, &msg,
3760 ovs_meter_stats_policy, a,
3761 ARRAY_SIZE(ovs_meter_stats_policy));
3762 if (error) {
3763 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3764 VLOG_INFO_RL(&rl, "dpif_netlink_meter_transact %s failed",
3765 command == OVS_METER_CMD_GET ? "get" : "del");
3766 return error;
3767 }
3768
3769 if (stats
3770 && a[OVS_METER_ATTR_ID]
3771 && a[OVS_METER_ATTR_STATS]
3772 && nl_attr_get_u32(a[OVS_METER_ATTR_ID]) == meter_id.uint32) {
3773 /* return stats */
3774 const struct ovs_flow_stats *stat;
3775 const struct nlattr *nla;
3776 size_t left;
3777
3778 stat = nl_attr_get(a[OVS_METER_ATTR_STATS]);
3779 stats->packet_in_count = get_32aligned_u64(&stat->n_packets);
3780 stats->byte_in_count = get_32aligned_u64(&stat->n_bytes);
3781
3782 if (a[OVS_METER_ATTR_BANDS]) {
3783 size_t n_bands = 0;
3784 NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
3785 const struct nlattr *band_nla;
3786 band_nla = nl_attr_find_nested(nla, OVS_BAND_ATTR_STATS);
3787 if (band_nla && nl_attr_get_size(band_nla) \
3788 == sizeof(struct ovs_flow_stats)) {
3789 stat = nl_attr_get(band_nla);
3790
3791 if (n_bands < max_bands) {
3792 stats->bands[n_bands].packet_count
3793 = get_32aligned_u64(&stat->n_packets);
3794 stats->bands[n_bands].byte_count
3795 = get_32aligned_u64(&stat->n_bytes);
3796 ++n_bands;
3797 }
3798 } else {
3799 stats->bands[n_bands].packet_count = 0;
3800 stats->bands[n_bands].byte_count = 0;
3801 ++n_bands;
3802 }
3803 }
3804 stats->n_bands = n_bands;
3805 } else {
3806 /* For a non-existent meter, return 0 stats. */
3807 stats->n_bands = 0;
3808 }
3809 }
3810
3811 ofpbuf_delete(msg);
3812 return error;
3813 }
3814
3815 static int
3816 dpif_netlink_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
3817 struct ofputil_meter_stats *stats, uint16_t max_bands)
3818 {
3819 return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
3820 OVS_METER_CMD_GET);
3821 }
3822
3823 static int
3824 dpif_netlink_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
3825 struct ofputil_meter_stats *stats, uint16_t max_bands)
3826 {
3827 return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
3828 OVS_METER_CMD_DEL);
3829 }
3830
3831 static bool
3832 probe_broken_meters__(struct dpif *dpif)
3833 {
3834 /* This test is destructive if a probe occurs while ovs-vswitchd is
3835 * running (e.g., an ovs-dpctl meter command is called), so choose a
3836 * random high meter id to make this less likely to occur. */
3837 ofproto_meter_id id1 = { 54545401 };
3838 ofproto_meter_id id2 = { 54545402 };
3839 struct ofputil_meter_band band = {OFPMBT13_DROP, 0, 1, 0};
3840 struct ofputil_meter_config config1 = { 1, OFPMF13_KBPS, 1, &band};
3841 struct ofputil_meter_config config2 = { 2, OFPMF13_KBPS, 1, &band};
3842
3843 /* Try adding two meters and make sure that they both come back with
3844 * the proper meter id. Use the "__" version so that we don't cause
3845 * a recurve deadlock. */
3846 dpif_netlink_meter_set__(dpif, id1, &config1);
3847 dpif_netlink_meter_set__(dpif, id2, &config2);
3848
3849 if (dpif_netlink_meter_get(dpif, id1, NULL, 0)
3850 || dpif_netlink_meter_get(dpif, id2, NULL, 0)) {
3851 VLOG_INFO("The kernel module has a broken meter implementation.");
3852 return true;
3853 }
3854
3855 dpif_netlink_meter_del(dpif, id1, NULL, 0);
3856 dpif_netlink_meter_del(dpif, id2, NULL, 0);
3857
3858 return false;
3859 }
3860
3861 static bool
3862 probe_broken_meters(struct dpif *dpif)
3863 {
3864 /* This is a once-only test because currently OVS only has at most a single
3865 * Netlink capable datapath on any given platform. */
3866 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3867
3868 static bool broken_meters = false;
3869 if (ovsthread_once_start(&once)) {
3870 broken_meters = probe_broken_meters__(dpif);
3871 ovsthread_once_done(&once);
3872 }
3873 return broken_meters;
3874 }
3875 \f
3876 const struct dpif_class dpif_netlink_class = {
3877 "system",
3878 false, /* cleanup_required */
3879 NULL, /* init */
3880 dpif_netlink_enumerate,
3881 NULL,
3882 dpif_netlink_open,
3883 dpif_netlink_close,
3884 dpif_netlink_destroy,
3885 dpif_netlink_run,
3886 NULL, /* wait */
3887 dpif_netlink_get_stats,
3888 dpif_netlink_port_add,
3889 dpif_netlink_port_del,
3890 NULL, /* port_set_config */
3891 dpif_netlink_port_query_by_number,
3892 dpif_netlink_port_query_by_name,
3893 dpif_netlink_port_get_pid,
3894 dpif_netlink_port_dump_start,
3895 dpif_netlink_port_dump_next,
3896 dpif_netlink_port_dump_done,
3897 dpif_netlink_port_poll,
3898 dpif_netlink_port_poll_wait,
3899 dpif_netlink_flow_flush,
3900 dpif_netlink_flow_dump_create,
3901 dpif_netlink_flow_dump_destroy,
3902 dpif_netlink_flow_dump_thread_create,
3903 dpif_netlink_flow_dump_thread_destroy,
3904 dpif_netlink_flow_dump_next,
3905 dpif_netlink_operate,
3906 dpif_netlink_recv_set,
3907 dpif_netlink_handlers_set,
3908 NULL, /* set_config */
3909 dpif_netlink_queue_to_priority,
3910 dpif_netlink_recv,
3911 dpif_netlink_recv_wait,
3912 dpif_netlink_recv_purge,
3913 NULL, /* register_dp_purge_cb */
3914 NULL, /* register_upcall_cb */
3915 NULL, /* enable_upcall */
3916 NULL, /* disable_upcall */
3917 dpif_netlink_get_datapath_version, /* get_datapath_version */
3918 dpif_netlink_ct_dump_start,
3919 dpif_netlink_ct_dump_next,
3920 dpif_netlink_ct_dump_done,
3921 dpif_netlink_ct_flush,
3922 NULL, /* ct_set_maxconns */
3923 NULL, /* ct_get_maxconns */
3924 NULL, /* ct_get_nconns */
3925 NULL, /* ct_set_tcp_seq_chk */
3926 NULL, /* ct_get_tcp_seq_chk */
3927 dpif_netlink_ct_set_limits,
3928 dpif_netlink_ct_get_limits,
3929 dpif_netlink_ct_del_limits,
3930 dpif_netlink_ct_set_timeout_policy,
3931 dpif_netlink_ct_get_timeout_policy,
3932 dpif_netlink_ct_del_timeout_policy,
3933 dpif_netlink_ct_timeout_policy_dump_start,
3934 dpif_netlink_ct_timeout_policy_dump_next,
3935 dpif_netlink_ct_timeout_policy_dump_done,
3936 dpif_netlink_ct_get_timeout_policy_name,
3937 NULL, /* ipf_set_enabled */
3938 NULL, /* ipf_set_min_frag */
3939 NULL, /* ipf_set_max_nfrags */
3940 NULL, /* ipf_get_status */
3941 NULL, /* ipf_dump_start */
3942 NULL, /* ipf_dump_next */
3943 NULL, /* ipf_dump_done */
3944 dpif_netlink_meter_get_features,
3945 dpif_netlink_meter_set,
3946 dpif_netlink_meter_get,
3947 dpif_netlink_meter_del,
3948 };
3949
3950 static int
3951 dpif_netlink_init(void)
3952 {
3953 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3954 static int error;
3955
3956 if (ovsthread_once_start(&once)) {
3957 error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
3958 &ovs_datapath_family);
3959 if (error) {
3960 VLOG_INFO("Generic Netlink family '%s' does not exist. "
3961 "The Open vSwitch kernel module is probably not loaded.",
3962 OVS_DATAPATH_FAMILY);
3963 }
3964 if (!error) {
3965 error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
3966 }
3967 if (!error) {
3968 error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
3969 }
3970 if (!error) {
3971 error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
3972 &ovs_packet_family);
3973 }
3974 if (!error) {
3975 error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
3976 &ovs_vport_mcgroup);
3977 }
3978 if (!error) {
3979 if (nl_lookup_genl_family(OVS_METER_FAMILY, &ovs_meter_family)) {
3980 VLOG_INFO("The kernel module does not support meters.");
3981 }
3982 }
3983 if (nl_lookup_genl_family(OVS_CT_LIMIT_FAMILY,
3984 &ovs_ct_limit_family) < 0) {
3985 VLOG_INFO("Generic Netlink family '%s' does not exist. "
3986 "Please update the Open vSwitch kernel module to enable "
3987 "the conntrack limit feature.", OVS_CT_LIMIT_FAMILY);
3988 }
3989
3990 ovs_tunnels_out_of_tree = dpif_netlink_rtnl_probe_oot_tunnels();
3991
3992 ovsthread_once_done(&once);
3993 }
3994
3995 return error;
3996 }
3997
3998 bool
3999 dpif_netlink_is_internal_device(const char *name)
4000 {
4001 struct dpif_netlink_vport reply;
4002 struct ofpbuf *buf;
4003 int error;
4004
4005 error = dpif_netlink_vport_get(name, &reply, &buf);
4006 if (!error) {
4007 ofpbuf_delete(buf);
4008 } else if (error != ENODEV && error != ENOENT) {
4009 VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
4010 name, ovs_strerror(error));
4011 }
4012
4013 return reply.type == OVS_VPORT_TYPE_INTERNAL;
4014 }
4015
4016 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4017 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
4018 * positive errno value.
4019 *
4020 * 'vport' will contain pointers into 'buf', so the caller should not free
4021 * 'buf' while 'vport' is still in use. */
4022 static int
4023 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
4024 const struct ofpbuf *buf)
4025 {
4026 static const struct nl_policy ovs_vport_policy[] = {
4027 [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
4028 [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
4029 [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
4030 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
4031 [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
4032 .optional = true },
4033 [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
4034 [OVS_VPORT_ATTR_NETNSID] = { .type = NL_A_U32, .optional = true },
4035 };
4036
4037 dpif_netlink_vport_init(vport);
4038
4039 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4040 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4041 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4042 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4043
4044 struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
4045 if (!nlmsg || !genl || !ovs_header
4046 || nlmsg->nlmsg_type != ovs_vport_family
4047 || !nl_policy_parse(&b, 0, ovs_vport_policy, a,
4048 ARRAY_SIZE(ovs_vport_policy))) {
4049 return EINVAL;
4050 }
4051
4052 vport->cmd = genl->cmd;
4053 vport->dp_ifindex = ovs_header->dp_ifindex;
4054 vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
4055 vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
4056 vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
4057 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
4058 vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
4059 / (sizeof *vport->upcall_pids);
4060 vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
4061
4062 }
4063 if (a[OVS_VPORT_ATTR_STATS]) {
4064 vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
4065 }
4066 if (a[OVS_VPORT_ATTR_OPTIONS]) {
4067 vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
4068 vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
4069 }
4070 if (a[OVS_VPORT_ATTR_NETNSID]) {
4071 netnsid_set(&vport->netnsid,
4072 nl_attr_get_u32(a[OVS_VPORT_ATTR_NETNSID]));
4073 } else {
4074 netnsid_set_local(&vport->netnsid);
4075 }
4076 return 0;
4077 }
4078
4079 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
4080 * followed by Netlink attributes corresponding to 'vport'. */
4081 static void
4082 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
4083 struct ofpbuf *buf)
4084 {
4085 struct ovs_header *ovs_header;
4086
4087 nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
4088 vport->cmd, OVS_VPORT_VERSION);
4089
4090 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4091 ovs_header->dp_ifindex = vport->dp_ifindex;
4092
4093 if (vport->port_no != ODPP_NONE) {
4094 nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
4095 }
4096
4097 if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
4098 nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
4099 }
4100
4101 if (vport->name) {
4102 nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
4103 }
4104
4105 if (vport->upcall_pids) {
4106 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
4107 vport->upcall_pids,
4108 vport->n_upcall_pids * sizeof *vport->upcall_pids);
4109 }
4110
4111 if (vport->stats) {
4112 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
4113 vport->stats, sizeof *vport->stats);
4114 }
4115
4116 if (vport->options) {
4117 nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
4118 vport->options, vport->options_len);
4119 }
4120 }
4121
4122 /* Clears 'vport' to "empty" values. */
4123 void
4124 dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
4125 {
4126 memset(vport, 0, sizeof *vport);
4127 vport->port_no = ODPP_NONE;
4128 }
4129
4130 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4131 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4132 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4133 * result of the command is expected to be an ovs_vport also, which is decoded
4134 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
4135 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
4136 int
4137 dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
4138 struct dpif_netlink_vport *reply,
4139 struct ofpbuf **bufp)
4140 {
4141 struct ofpbuf *request_buf;
4142 int error;
4143
4144 ovs_assert((reply != NULL) == (bufp != NULL));
4145
4146 error = dpif_netlink_init();
4147 if (error) {
4148 if (reply) {
4149 *bufp = NULL;
4150 dpif_netlink_vport_init(reply);
4151 }
4152 return error;
4153 }
4154
4155 request_buf = ofpbuf_new(1024);
4156 dpif_netlink_vport_to_ofpbuf(request, request_buf);
4157 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4158 ofpbuf_delete(request_buf);
4159
4160 if (reply) {
4161 if (!error) {
4162 error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
4163 }
4164 if (error) {
4165 dpif_netlink_vport_init(reply);
4166 ofpbuf_delete(*bufp);
4167 *bufp = NULL;
4168 }
4169 }
4170 return error;
4171 }
4172
4173 /* Obtains information about the kernel vport named 'name' and stores it into
4174 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
4175 * longer needed ('reply' will contain pointers into '*bufp'). */
4176 int
4177 dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
4178 struct ofpbuf **bufp)
4179 {
4180 struct dpif_netlink_vport request;
4181
4182 dpif_netlink_vport_init(&request);
4183 request.cmd = OVS_VPORT_CMD_GET;
4184 request.name = name;
4185
4186 return dpif_netlink_vport_transact(&request, reply, bufp);
4187 }
4188
4189 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4190 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
4191 * positive errno value.
4192 *
4193 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
4194 * while 'dp' is still in use. */
4195 static int
4196 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
4197 {
4198 static const struct nl_policy ovs_datapath_policy[] = {
4199 [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
4200 [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
4201 .optional = true },
4202 [OVS_DP_ATTR_MEGAFLOW_STATS] = {
4203 NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
4204 .optional = true },
4205 };
4206
4207 dpif_netlink_dp_init(dp);
4208
4209 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4210 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4211 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4212 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4213
4214 struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
4215 if (!nlmsg || !genl || !ovs_header
4216 || nlmsg->nlmsg_type != ovs_datapath_family
4217 || !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
4218 ARRAY_SIZE(ovs_datapath_policy))) {
4219 return EINVAL;
4220 }
4221
4222 dp->cmd = genl->cmd;
4223 dp->dp_ifindex = ovs_header->dp_ifindex;
4224 dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
4225 if (a[OVS_DP_ATTR_STATS]) {
4226 dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
4227 }
4228
4229 if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
4230 dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
4231 }
4232
4233 return 0;
4234 }
4235
4236 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
4237 static void
4238 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
4239 {
4240 struct ovs_header *ovs_header;
4241
4242 nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
4243 NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
4244 OVS_DATAPATH_VERSION);
4245
4246 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4247 ovs_header->dp_ifindex = dp->dp_ifindex;
4248
4249 if (dp->name) {
4250 nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
4251 }
4252
4253 if (dp->upcall_pid) {
4254 nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
4255 }
4256
4257 if (dp->user_features) {
4258 nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
4259 }
4260
4261 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
4262 }
4263
4264 /* Clears 'dp' to "empty" values. */
4265 static void
4266 dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
4267 {
4268 memset(dp, 0, sizeof *dp);
4269 }
4270
4271 static void
4272 dpif_netlink_dp_dump_start(struct nl_dump *dump)
4273 {
4274 struct dpif_netlink_dp request;
4275 struct ofpbuf *buf;
4276
4277 dpif_netlink_dp_init(&request);
4278 request.cmd = OVS_DP_CMD_GET;
4279
4280 buf = ofpbuf_new(1024);
4281 dpif_netlink_dp_to_ofpbuf(&request, buf);
4282 nl_dump_start(dump, NETLINK_GENERIC, buf);
4283 ofpbuf_delete(buf);
4284 }
4285
4286 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4287 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4288 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4289 * result of the command is expected to be of the same form, which is decoded
4290 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
4291 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
4292 static int
4293 dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
4294 struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
4295 {
4296 struct ofpbuf *request_buf;
4297 int error;
4298
4299 ovs_assert((reply != NULL) == (bufp != NULL));
4300
4301 request_buf = ofpbuf_new(1024);
4302 dpif_netlink_dp_to_ofpbuf(request, request_buf);
4303 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4304 ofpbuf_delete(request_buf);
4305
4306 if (reply) {
4307 dpif_netlink_dp_init(reply);
4308 if (!error) {
4309 error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
4310 }
4311 if (error) {
4312 ofpbuf_delete(*bufp);
4313 *bufp = NULL;
4314 }
4315 }
4316 return error;
4317 }
4318
4319 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
4320 * The caller must free '*bufp' when the reply is no longer needed ('reply'
4321 * will contain pointers into '*bufp'). */
4322 static int
4323 dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
4324 struct ofpbuf **bufp)
4325 {
4326 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
4327 struct dpif_netlink_dp request;
4328
4329 dpif_netlink_dp_init(&request);
4330 request.cmd = OVS_DP_CMD_GET;
4331 request.dp_ifindex = dpif->dp_ifindex;
4332
4333 return dpif_netlink_dp_transact(&request, reply, bufp);
4334 }
4335
4336 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
4337 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
4338 * positive errno value.
4339 *
4340 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
4341 * while 'flow' is still in use. */
4342 static int
4343 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
4344 const struct ofpbuf *buf)
4345 {
4346 static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
4347 [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
4348 [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
4349 [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
4350 [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
4351 .optional = true },
4352 [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
4353 [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
4354 [OVS_FLOW_ATTR_UFID] = { .type = NL_A_U128, .optional = true },
4355 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
4356 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
4357 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
4358 };
4359
4360 dpif_netlink_flow_init(flow);
4361
4362 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
4363 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
4364 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
4365 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
4366
4367 struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
4368 if (!nlmsg || !genl || !ovs_header
4369 || nlmsg->nlmsg_type != ovs_flow_family
4370 || !nl_policy_parse(&b, 0, ovs_flow_policy, a,
4371 ARRAY_SIZE(ovs_flow_policy))) {
4372 return EINVAL;
4373 }
4374 if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
4375 return EINVAL;
4376 }
4377
4378 flow->nlmsg_flags = nlmsg->nlmsg_flags;
4379 flow->dp_ifindex = ovs_header->dp_ifindex;
4380 if (a[OVS_FLOW_ATTR_KEY]) {
4381 flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
4382 flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
4383 }
4384
4385 if (a[OVS_FLOW_ATTR_UFID]) {
4386 flow->ufid = nl_attr_get_u128(a[OVS_FLOW_ATTR_UFID]);
4387 flow->ufid_present = true;
4388 }
4389 if (a[OVS_FLOW_ATTR_MASK]) {
4390 flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
4391 flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
4392 }
4393 if (a[OVS_FLOW_ATTR_ACTIONS]) {
4394 flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
4395 flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
4396 }
4397 if (a[OVS_FLOW_ATTR_STATS]) {
4398 flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
4399 }
4400 if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
4401 flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
4402 }
4403 if (a[OVS_FLOW_ATTR_USED]) {
4404 flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
4405 }
4406 return 0;
4407 }
4408
4409
4410 /*
4411 * If PACKET_TYPE attribute is present in 'data', it filters PACKET_TYPE out.
4412 * If the flow is not Ethernet, the OVS_KEY_ATTR_PACKET_TYPE is converted to
4413 * OVS_KEY_ATTR_ETHERTYPE. Puts 'data' to 'buf'.
4414 */
4415 static void
4416 put_exclude_packet_type(struct ofpbuf *buf, uint16_t type,
4417 const struct nlattr *data, uint16_t data_len)
4418 {
4419 const struct nlattr *packet_type;
4420
4421 packet_type = nl_attr_find__(data, data_len, OVS_KEY_ATTR_PACKET_TYPE);
4422
4423 if (packet_type) {
4424 /* exclude PACKET_TYPE Netlink attribute. */
4425 ovs_assert(NLA_ALIGN(packet_type->nla_len) == NL_A_U32_SIZE);
4426 size_t packet_type_len = NL_A_U32_SIZE;
4427 size_t first_chunk_size = (uint8_t *)packet_type - (uint8_t *)data;
4428 size_t second_chunk_size = data_len - first_chunk_size
4429 - packet_type_len;
4430 struct nlattr *next_attr = nl_attr_next(packet_type);
4431 size_t ofs;
4432
4433 ofs = nl_msg_start_nested(buf, type);
4434 nl_msg_put(buf, data, first_chunk_size);
4435 nl_msg_put(buf, next_attr, second_chunk_size);
4436 if (!nl_attr_find__(data, data_len, OVS_KEY_ATTR_ETHERNET)) {
4437 ovs_be16 pt = pt_ns_type_be(nl_attr_get_be32(packet_type));
4438 const struct nlattr *nla;
4439
4440 nla = nl_attr_find(buf, ofs + NLA_HDRLEN, OVS_KEY_ATTR_ETHERTYPE);
4441 if (nla) {
4442 ovs_be16 *ethertype;
4443
4444 ethertype = CONST_CAST(ovs_be16 *, nl_attr_get(nla));
4445 *ethertype = pt;
4446 } else {
4447 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, pt);
4448 }
4449 }
4450 nl_msg_end_nested(buf, ofs);
4451 } else {
4452 nl_msg_put_unspec(buf, type, data, data_len);
4453 }
4454 }
4455
4456 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
4457 * followed by Netlink attributes corresponding to 'flow'. */
4458 static void
4459 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
4460 struct ofpbuf *buf)
4461 {
4462 struct ovs_header *ovs_header;
4463
4464 nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
4465 NLM_F_REQUEST | flow->nlmsg_flags,
4466 flow->cmd, OVS_FLOW_VERSION);
4467
4468 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
4469 ovs_header->dp_ifindex = flow->dp_ifindex;
4470
4471 if (flow->ufid_present) {
4472 nl_msg_put_u128(buf, OVS_FLOW_ATTR_UFID, flow->ufid);
4473 }
4474 if (flow->ufid_terse) {
4475 nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
4476 OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
4477 | OVS_UFID_F_OMIT_ACTIONS);
4478 }
4479 if (!flow->ufid_terse || !flow->ufid_present) {
4480 if (flow->key_len) {
4481 put_exclude_packet_type(buf, OVS_FLOW_ATTR_KEY, flow->key,
4482 flow->key_len);
4483 }
4484 if (flow->mask_len) {
4485 put_exclude_packet_type(buf, OVS_FLOW_ATTR_MASK, flow->mask,
4486 flow->mask_len);
4487 }
4488 if (flow->actions || flow->actions_len) {
4489 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
4490 flow->actions, flow->actions_len);
4491 }
4492 }
4493
4494 /* We never need to send these to the kernel. */
4495 ovs_assert(!flow->stats);
4496 ovs_assert(!flow->tcp_flags);
4497 ovs_assert(!flow->used);
4498
4499 if (flow->clear) {
4500 nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
4501 }
4502 if (flow->probe) {
4503 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
4504 }
4505 }
4506
4507 /* Clears 'flow' to "empty" values. */
4508 static void
4509 dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
4510 {
4511 memset(flow, 0, sizeof *flow);
4512 }
4513
4514 /* Executes 'request' in the kernel datapath. If the command fails, returns a
4515 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
4516 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
4517 * result of the command is expected to be a flow also, which is decoded and
4518 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
4519 * is no longer needed ('reply' will contain pointers into '*bufp'). */
4520 static int
4521 dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
4522 struct dpif_netlink_flow *reply,
4523 struct ofpbuf **bufp)
4524 {
4525 struct ofpbuf *request_buf;
4526 int error;
4527
4528 ovs_assert((reply != NULL) == (bufp != NULL));
4529
4530 if (reply) {
4531 request->nlmsg_flags |= NLM_F_ECHO;
4532 }
4533
4534 request_buf = ofpbuf_new(1024);
4535 dpif_netlink_flow_to_ofpbuf(request, request_buf);
4536 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
4537 ofpbuf_delete(request_buf);
4538
4539 if (reply) {
4540 if (!error) {
4541 error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
4542 }
4543 if (error) {
4544 dpif_netlink_flow_init(reply);
4545 ofpbuf_delete(*bufp);
4546 *bufp = NULL;
4547 }
4548 }
4549 return error;
4550 }
4551
4552 static void
4553 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
4554 struct dpif_flow_stats *stats)
4555 {
4556 if (flow->stats) {
4557 stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
4558 stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
4559 } else {
4560 stats->n_packets = 0;
4561 stats->n_bytes = 0;
4562 }
4563 stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
4564 stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
4565 }
4566
4567 /* Logs information about a packet that was recently lost in 'ch' (in
4568 * 'dpif_'). */
4569 static void
4570 report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
4571 uint32_t handler_id)
4572 {
4573 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
4574 struct ds s;
4575
4576 if (VLOG_DROP_WARN(&rl)) {
4577 return;
4578 }
4579
4580 ds_init(&s);
4581 if (ch->last_poll != LLONG_MIN) {
4582 ds_put_format(&s, " (last polled %lld ms ago)",
4583 time_msec() - ch->last_poll);
4584 }
4585
4586 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
4587 dpif_name(&dpif->dpif), ch_idx, handler_id);
4588 ds_destroy(&s);
4589 }