]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif-netlink.c
dpif: Refactor obj type from void pointer to dpif_class
[mirror_ovs.git] / lib / dpif-netlink.c
1 /*
2 * Copyright (c) 2008-2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dpif-netlink.h"
20
21 #include <ctype.h>
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <net/if.h>
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
28 #include <poll.h>
29 #include <stdlib.h>
30 #include <strings.h>
31 #include <sys/epoll.h>
32 #include <sys/stat.h>
33 #include <unistd.h>
34
35 #include "bitmap.h"
36 #include "dpif-provider.h"
37 #include "dpif-netlink-rtnl.h"
38 #include "openvswitch/dynamic-string.h"
39 #include "flow.h"
40 #include "fat-rwlock.h"
41 #include "netdev.h"
42 #include "netdev-provider.h"
43 #include "netdev-linux.h"
44 #include "netdev-vport.h"
45 #include "netlink-conntrack.h"
46 #include "netlink-notifier.h"
47 #include "netlink-socket.h"
48 #include "netlink.h"
49 #include "odp-util.h"
50 #include "openvswitch/ofpbuf.h"
51 #include "packets.h"
52 #include "poll-loop.h"
53 #include "random.h"
54 #include "openvswitch/shash.h"
55 #include "sset.h"
56 #include "timeval.h"
57 #include "unaligned.h"
58 #include "util.h"
59 #include "openvswitch/vlog.h"
60 #include "openvswitch/flow.h"
61
62 VLOG_DEFINE_THIS_MODULE(dpif_netlink);
63 #ifdef _WIN32
64 #include "wmi.h"
65 enum { WINDOWS = 1 };
66 #else
67 enum { WINDOWS = 0 };
68 #endif
69 enum { MAX_PORTS = USHRT_MAX };
70
71 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
72 * missing if we have old headers. */
73 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
74
75 #define FLOW_DUMP_MAX_BATCH 50
76 #define OPERATE_MAX_OPS 50
77
78 struct dpif_netlink_dp {
79 /* Generic Netlink header. */
80 uint8_t cmd;
81
82 /* struct ovs_header. */
83 int dp_ifindex;
84
85 /* Attributes. */
86 const char *name; /* OVS_DP_ATTR_NAME. */
87 const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
88 uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
89 const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
90 const struct ovs_dp_megaflow_stats *megaflow_stats;
91 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
92 };
93
94 static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
95 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
96 const struct ofpbuf *);
97 static void dpif_netlink_dp_dump_start(struct nl_dump *);
98 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
99 struct dpif_netlink_dp *reply,
100 struct ofpbuf **bufp);
101 static int dpif_netlink_dp_get(const struct dpif *,
102 struct dpif_netlink_dp *reply,
103 struct ofpbuf **bufp);
104
105 struct dpif_netlink_flow {
106 /* Generic Netlink header. */
107 uint8_t cmd;
108
109 /* struct ovs_header. */
110 unsigned int nlmsg_flags;
111 int dp_ifindex;
112
113 /* Attributes.
114 *
115 * The 'stats' member points to 64-bit data that might only be aligned on
116 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
117 * values.
118 *
119 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
120 * the Netlink version of the command, even if actions_len is zero. */
121 const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
122 size_t key_len;
123 const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
124 size_t mask_len;
125 const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
126 size_t actions_len;
127 ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
128 bool ufid_present; /* Is there a UFID? */
129 bool ufid_terse; /* Skip serializing key/mask/acts? */
130 const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
131 const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
132 const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
133 bool clear; /* OVS_FLOW_ATTR_CLEAR. */
134 bool probe; /* OVS_FLOW_ATTR_PROBE. */
135 };
136
137 static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
138 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
139 const struct ofpbuf *);
140 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
141 struct ofpbuf *);
142 static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
143 struct dpif_netlink_flow *reply,
144 struct ofpbuf **bufp);
145 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
146 struct dpif_flow_stats *);
147 static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
148 const struct dpif_netlink_flow *);
149
150 /* One of the dpif channels between the kernel and userspace. */
151 struct dpif_channel {
152 struct nl_sock *sock; /* Netlink socket. */
153 long long int last_poll; /* Last time this channel was polled. */
154 };
155
156 #ifdef _WIN32
157 #define VPORT_SOCK_POOL_SIZE 1
158 /* On Windows, there is no native support for epoll. There are equivalent
159 * interfaces though, that are not used currently. For simpicity, a pool of
160 * netlink sockets is used. Each socket is represented by 'struct
161 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
162 * sharing the same socket. In the future, we can add a reference count and
163 * such fields. */
164 struct dpif_windows_vport_sock {
165 struct nl_sock *nl_sock; /* netlink socket. */
166 };
167 #endif
168
169 struct dpif_handler {
170 struct dpif_channel *channels;/* Array of channels for each handler. */
171 struct epoll_event *epoll_events;
172 int epoll_fd; /* epoll fd that includes channel socks. */
173 int n_events; /* Num events returned by epoll_wait(). */
174 int event_offset; /* Offset into 'epoll_events'. */
175
176 #ifdef _WIN32
177 /* Pool of sockets. */
178 struct dpif_windows_vport_sock *vport_sock_pool;
179 size_t last_used_pool_idx; /* Index to aid in allocating a
180 socket in the pool to a port. */
181 #endif
182 };
183
184 /* Datapath interface for the openvswitch Linux kernel module. */
185 struct dpif_netlink {
186 struct dpif dpif;
187 int dp_ifindex;
188
189 /* Upcall messages. */
190 struct fat_rwlock upcall_lock;
191 struct dpif_handler *handlers;
192 uint32_t n_handlers; /* Num of upcall handlers. */
193 int uc_array_size; /* Size of 'handler->channels' and */
194 /* 'handler->epoll_events'. */
195
196 /* Change notification. */
197 struct nl_sock *port_notifier; /* vport multicast group subscriber. */
198 bool refresh_channels;
199 };
200
201 static void report_loss(struct dpif_netlink *, struct dpif_channel *,
202 uint32_t ch_idx, uint32_t handler_id);
203
204 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
205
206 /* Generic Netlink family numbers for OVS.
207 *
208 * Initialized by dpif_netlink_init(). */
209 static int ovs_datapath_family;
210 static int ovs_vport_family;
211 static int ovs_flow_family;
212 static int ovs_packet_family;
213
214 /* Generic Netlink multicast groups for OVS.
215 *
216 * Initialized by dpif_netlink_init(). */
217 static unsigned int ovs_vport_mcgroup;
218
219 /* If true, tunnel devices are created using OVS compat/genetlink.
220 * If false, tunnel devices are created with rtnetlink and using light weight
221 * tunnels. If we fail to create the tunnel the rtnetlink+LWT, then we fallback
222 * to using the compat interface. */
223 static bool ovs_tunnels_out_of_tree = true;
224
225 static int dpif_netlink_init(void);
226 static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
227 static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
228 odp_port_t port_no, uint32_t hash);
229 static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
230 static int dpif_netlink_refresh_channels(struct dpif_netlink *,
231 uint32_t n_handlers);
232 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
233 struct ofpbuf *);
234 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
235 const struct ofpbuf *);
236 static int dpif_netlink_port_query__(const struct dpif_netlink *dpif,
237 odp_port_t port_no, const char *port_name,
238 struct dpif_port *dpif_port);
239
240 static struct dpif_netlink *
241 dpif_netlink_cast(const struct dpif *dpif)
242 {
243 dpif_assert_class(dpif, &dpif_netlink_class);
244 return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
245 }
246
247 static int
248 dpif_netlink_enumerate(struct sset *all_dps,
249 const struct dpif_class *dpif_class OVS_UNUSED)
250 {
251 struct nl_dump dump;
252 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
253 struct ofpbuf msg, buf;
254 int error;
255
256 error = dpif_netlink_init();
257 if (error) {
258 return error;
259 }
260
261 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
262 dpif_netlink_dp_dump_start(&dump);
263 while (nl_dump_next(&dump, &msg, &buf)) {
264 struct dpif_netlink_dp dp;
265
266 if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
267 sset_add(all_dps, dp.name);
268 }
269 }
270 ofpbuf_uninit(&buf);
271 return nl_dump_done(&dump);
272 }
273
274 static int
275 dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
276 bool create, struct dpif **dpifp)
277 {
278 struct dpif_netlink_dp dp_request, dp;
279 struct ofpbuf *buf;
280 uint32_t upcall_pid;
281 int error;
282
283 error = dpif_netlink_init();
284 if (error) {
285 return error;
286 }
287
288 /* Create or look up datapath. */
289 dpif_netlink_dp_init(&dp_request);
290 if (create) {
291 dp_request.cmd = OVS_DP_CMD_NEW;
292 upcall_pid = 0;
293 dp_request.upcall_pid = &upcall_pid;
294 } else {
295 /* Use OVS_DP_CMD_SET to report user features */
296 dp_request.cmd = OVS_DP_CMD_SET;
297 }
298 dp_request.name = name;
299 dp_request.user_features |= OVS_DP_F_UNALIGNED;
300 dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
301 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
302 if (error) {
303 return error;
304 }
305
306 error = open_dpif(&dp, dpifp);
307 ofpbuf_delete(buf);
308 return error;
309 }
310
311 static int
312 open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
313 {
314 struct dpif_netlink *dpif;
315
316 dpif = xzalloc(sizeof *dpif);
317 dpif->port_notifier = NULL;
318 fat_rwlock_init(&dpif->upcall_lock);
319
320 dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
321 dp->dp_ifindex, dp->dp_ifindex);
322
323 dpif->dp_ifindex = dp->dp_ifindex;
324 *dpifp = &dpif->dpif;
325
326 return 0;
327 }
328
329 /* Destroys the netlink sockets pointed by the elements in 'socksp'
330 * and frees the 'socksp'. */
331 static void
332 vport_del_socksp__(struct nl_sock **socksp, uint32_t n_socks)
333 {
334 size_t i;
335
336 for (i = 0; i < n_socks; i++) {
337 nl_sock_destroy(socksp[i]);
338 }
339
340 free(socksp);
341 }
342
343 /* Creates an array of netlink sockets. Returns an array of the
344 * corresponding pointers. Records the error in 'error'. */
345 static struct nl_sock **
346 vport_create_socksp__(uint32_t n_socks, int *error)
347 {
348 struct nl_sock **socksp = xzalloc(n_socks * sizeof *socksp);
349 size_t i;
350
351 for (i = 0; i < n_socks; i++) {
352 *error = nl_sock_create(NETLINK_GENERIC, &socksp[i]);
353 if (*error) {
354 goto error;
355 }
356 }
357
358 return socksp;
359
360 error:
361 vport_del_socksp__(socksp, n_socks);
362
363 return NULL;
364 }
365
366 #ifdef _WIN32
367 static void
368 vport_delete_sock_pool(struct dpif_handler *handler)
369 OVS_REQ_WRLOCK(dpif->upcall_lock)
370 {
371 if (handler->vport_sock_pool) {
372 uint32_t i;
373 struct dpif_windows_vport_sock *sock_pool =
374 handler->vport_sock_pool;
375
376 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
377 if (sock_pool[i].nl_sock) {
378 nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
379 nl_sock_destroy(sock_pool[i].nl_sock);
380 sock_pool[i].nl_sock = NULL;
381 }
382 }
383
384 free(handler->vport_sock_pool);
385 handler->vport_sock_pool = NULL;
386 }
387 }
388
389 static int
390 vport_create_sock_pool(struct dpif_handler *handler)
391 OVS_REQ_WRLOCK(dpif->upcall_lock)
392 {
393 struct dpif_windows_vport_sock *sock_pool;
394 size_t i;
395 int error = 0;
396
397 sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
398 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
399 error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
400 if (error) {
401 goto error;
402 }
403
404 /* Enable the netlink socket to receive packets. This is equivalent to
405 * calling nl_sock_join_mcgroup() to receive events. */
406 error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
407 if (error) {
408 goto error;
409 }
410 }
411
412 handler->vport_sock_pool = sock_pool;
413 handler->last_used_pool_idx = 0;
414 return 0;
415
416 error:
417 vport_delete_sock_pool(handler);
418 return error;
419 }
420
421 /* Returns an array pointers to netlink sockets. The sockets are picked from a
422 * pool. Records the error in 'error'. */
423 static struct nl_sock **
424 vport_create_socksp_windows(struct dpif_netlink *dpif, int *error)
425 OVS_REQ_WRLOCK(dpif->upcall_lock)
426 {
427 uint32_t n_socks = dpif->n_handlers;
428 struct nl_sock **socksp;
429 size_t i;
430
431 ovs_assert(n_socks <= 1);
432 socksp = xzalloc(n_socks * sizeof *socksp);
433
434 /* Pick netlink sockets to use in a round-robin fashion from each
435 * handler's pool of sockets. */
436 for (i = 0; i < n_socks; i++) {
437 struct dpif_handler *handler = &dpif->handlers[i];
438 struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
439 size_t index = handler->last_used_pool_idx;
440
441 /* A pool of sockets is allocated when the handler is initialized. */
442 if (sock_pool == NULL) {
443 free(socksp);
444 *error = EINVAL;
445 return NULL;
446 }
447
448 ovs_assert(index < VPORT_SOCK_POOL_SIZE);
449 socksp[i] = sock_pool[index].nl_sock;
450 socksp[i] = sock_pool[index].nl_sock;
451 ovs_assert(socksp[i]);
452 index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
453 handler->last_used_pool_idx = index;
454 }
455
456 return socksp;
457 }
458
459 static void
460 vport_del_socksp_windows(struct dpif_netlink *dpif, struct nl_sock **socksp)
461 {
462 free(socksp);
463 }
464 #endif /* _WIN32 */
465
466 static struct nl_sock **
467 vport_create_socksp(struct dpif_netlink *dpif, int *error)
468 {
469 #ifdef _WIN32
470 return vport_create_socksp_windows(dpif, error);
471 #else
472 return vport_create_socksp__(dpif->n_handlers, error);
473 #endif
474 }
475
476 static void
477 vport_del_socksp(struct dpif_netlink *dpif, struct nl_sock **socksp)
478 {
479 #ifdef _WIN32
480 vport_del_socksp_windows(dpif, socksp);
481 #else
482 vport_del_socksp__(socksp, dpif->n_handlers);
483 #endif
484 }
485
486 /* Given the array of pointers to netlink sockets 'socksp', returns
487 * the array of corresponding pids. If the 'socksp' is NULL, returns
488 * a single-element array of value 0. */
489 static uint32_t *
490 vport_socksp_to_pids(struct nl_sock **socksp, uint32_t n_socks)
491 {
492 uint32_t *pids;
493
494 if (!socksp) {
495 pids = xzalloc(sizeof *pids);
496 } else {
497 size_t i;
498
499 pids = xzalloc(n_socks * sizeof *pids);
500 for (i = 0; i < n_socks; i++) {
501 pids[i] = nl_sock_pid(socksp[i]);
502 }
503 }
504
505 return pids;
506 }
507
508 /* Given the port number 'port_idx', extracts the pids of netlink sockets
509 * associated to the port and assigns it to 'upcall_pids'. */
510 static bool
511 vport_get_pids(struct dpif_netlink *dpif, uint32_t port_idx,
512 uint32_t **upcall_pids)
513 {
514 uint32_t *pids;
515 size_t i;
516
517 /* Since the nl_sock can only be assigned in either all
518 * or none "dpif->handlers" channels, the following check
519 * would suffice. */
520 if (!dpif->handlers[0].channels[port_idx].sock) {
521 return false;
522 }
523 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
524
525 pids = xzalloc(dpif->n_handlers * sizeof *pids);
526
527 for (i = 0; i < dpif->n_handlers; i++) {
528 pids[i] = nl_sock_pid(dpif->handlers[i].channels[port_idx].sock);
529 }
530
531 *upcall_pids = pids;
532
533 return true;
534 }
535
536 static int
537 vport_add_channels(struct dpif_netlink *dpif, odp_port_t port_no,
538 struct nl_sock **socksp)
539 {
540 struct epoll_event event;
541 uint32_t port_idx = odp_to_u32(port_no);
542 size_t i, j;
543 int error;
544
545 if (dpif->handlers == NULL) {
546 return 0;
547 }
548
549 /* We assume that the datapath densely chooses port numbers, which can
550 * therefore be used as an index into 'channels' and 'epoll_events' of
551 * 'dpif->handler'. */
552 if (port_idx >= dpif->uc_array_size) {
553 uint32_t new_size = port_idx + 1;
554
555 if (new_size > MAX_PORTS) {
556 VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
557 dpif_name(&dpif->dpif), port_no);
558 return EFBIG;
559 }
560
561 for (i = 0; i < dpif->n_handlers; i++) {
562 struct dpif_handler *handler = &dpif->handlers[i];
563
564 handler->channels = xrealloc(handler->channels,
565 new_size * sizeof *handler->channels);
566
567 for (j = dpif->uc_array_size; j < new_size; j++) {
568 handler->channels[j].sock = NULL;
569 }
570
571 handler->epoll_events = xrealloc(handler->epoll_events,
572 new_size * sizeof *handler->epoll_events);
573
574 }
575 dpif->uc_array_size = new_size;
576 }
577
578 memset(&event, 0, sizeof event);
579 event.events = EPOLLIN;
580 event.data.u32 = port_idx;
581
582 for (i = 0; i < dpif->n_handlers; i++) {
583 struct dpif_handler *handler = &dpif->handlers[i];
584
585 #ifndef _WIN32
586 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp[i]),
587 &event) < 0) {
588 error = errno;
589 goto error;
590 }
591 #endif
592 dpif->handlers[i].channels[port_idx].sock = socksp[i];
593 dpif->handlers[i].channels[port_idx].last_poll = LLONG_MIN;
594 }
595
596 return 0;
597
598 error:
599 for (j = 0; j < i; j++) {
600 #ifndef _WIN32
601 epoll_ctl(dpif->handlers[j].epoll_fd, EPOLL_CTL_DEL,
602 nl_sock_fd(socksp[j]), NULL);
603 #endif
604 dpif->handlers[j].channels[port_idx].sock = NULL;
605 }
606
607 return error;
608 }
609
610 static void
611 vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
612 {
613 uint32_t port_idx = odp_to_u32(port_no);
614 size_t i;
615
616 if (!dpif->handlers || port_idx >= dpif->uc_array_size) {
617 return;
618 }
619
620 /* Since the sock can only be assigned in either all or none
621 * of "dpif->handlers" channels, the following check would
622 * suffice. */
623 if (!dpif->handlers[0].channels[port_idx].sock) {
624 return;
625 }
626
627 for (i = 0; i < dpif->n_handlers; i++) {
628 struct dpif_handler *handler = &dpif->handlers[i];
629 #ifndef _WIN32
630 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
631 nl_sock_fd(handler->channels[port_idx].sock), NULL);
632 nl_sock_destroy(handler->channels[port_idx].sock);
633 #endif
634 handler->channels[port_idx].sock = NULL;
635 handler->event_offset = handler->n_events = 0;
636 }
637 }
638
639 static void
640 destroy_all_channels(struct dpif_netlink *dpif)
641 OVS_REQ_WRLOCK(dpif->upcall_lock)
642 {
643 unsigned int i;
644
645 if (!dpif->handlers) {
646 return;
647 }
648
649 for (i = 0; i < dpif->uc_array_size; i++ ) {
650 struct dpif_netlink_vport vport_request;
651 uint32_t upcall_pids = 0;
652
653 /* Since the sock can only be assigned in either all or none
654 * of "dpif->handlers" channels, the following check would
655 * suffice. */
656 if (!dpif->handlers[0].channels[i].sock) {
657 continue;
658 }
659
660 /* Turn off upcalls. */
661 dpif_netlink_vport_init(&vport_request);
662 vport_request.cmd = OVS_VPORT_CMD_SET;
663 vport_request.dp_ifindex = dpif->dp_ifindex;
664 vport_request.port_no = u32_to_odp(i);
665 vport_request.n_upcall_pids = 1;
666 vport_request.upcall_pids = &upcall_pids;
667 dpif_netlink_vport_transact(&vport_request, NULL, NULL);
668
669 vport_del_channels(dpif, u32_to_odp(i));
670 }
671
672 for (i = 0; i < dpif->n_handlers; i++) {
673 struct dpif_handler *handler = &dpif->handlers[i];
674
675 dpif_netlink_handler_uninit(handler);
676 free(handler->epoll_events);
677 free(handler->channels);
678 }
679
680 free(dpif->handlers);
681 dpif->handlers = NULL;
682 dpif->n_handlers = 0;
683 dpif->uc_array_size = 0;
684 }
685
686 static void
687 dpif_netlink_close(struct dpif *dpif_)
688 {
689 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
690
691 nl_sock_destroy(dpif->port_notifier);
692
693 fat_rwlock_wrlock(&dpif->upcall_lock);
694 destroy_all_channels(dpif);
695 fat_rwlock_unlock(&dpif->upcall_lock);
696
697 fat_rwlock_destroy(&dpif->upcall_lock);
698 free(dpif);
699 }
700
701 static int
702 dpif_netlink_destroy(struct dpif *dpif_)
703 {
704 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
705 struct dpif_netlink_dp dp;
706
707 dpif_netlink_dp_init(&dp);
708 dp.cmd = OVS_DP_CMD_DEL;
709 dp.dp_ifindex = dpif->dp_ifindex;
710 return dpif_netlink_dp_transact(&dp, NULL, NULL);
711 }
712
713 static bool
714 dpif_netlink_run(struct dpif *dpif_)
715 {
716 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
717
718 if (dpif->refresh_channels) {
719 dpif->refresh_channels = false;
720 fat_rwlock_wrlock(&dpif->upcall_lock);
721 dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
722 fat_rwlock_unlock(&dpif->upcall_lock);
723 }
724 return false;
725 }
726
727 static int
728 dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
729 {
730 struct dpif_netlink_dp dp;
731 struct ofpbuf *buf;
732 int error;
733
734 error = dpif_netlink_dp_get(dpif_, &dp, &buf);
735 if (!error) {
736 memset(stats, 0, sizeof *stats);
737
738 if (dp.stats) {
739 stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
740 stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
741 stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
742 stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
743 }
744
745 if (dp.megaflow_stats) {
746 stats->n_masks = dp.megaflow_stats->n_masks;
747 stats->n_mask_hit = get_32aligned_u64(
748 &dp.megaflow_stats->n_mask_hit);
749 } else {
750 stats->n_masks = UINT32_MAX;
751 stats->n_mask_hit = UINT64_MAX;
752 }
753 ofpbuf_delete(buf);
754 }
755 return error;
756 }
757
758 static const char *
759 get_vport_type(const struct dpif_netlink_vport *vport)
760 {
761 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
762
763 switch (vport->type) {
764 case OVS_VPORT_TYPE_NETDEV: {
765 const char *type = netdev_get_type_from_name(vport->name);
766
767 return type ? type : "system";
768 }
769
770 case OVS_VPORT_TYPE_INTERNAL:
771 return "internal";
772
773 case OVS_VPORT_TYPE_GENEVE:
774 return "geneve";
775
776 case OVS_VPORT_TYPE_GRE:
777 return "gre";
778
779 case OVS_VPORT_TYPE_VXLAN:
780 return "vxlan";
781
782 case OVS_VPORT_TYPE_LISP:
783 return "lisp";
784
785 case OVS_VPORT_TYPE_STT:
786 return "stt";
787
788 case OVS_VPORT_TYPE_UNSPEC:
789 case __OVS_VPORT_TYPE_MAX:
790 break;
791 }
792
793 VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
794 vport->dp_ifindex, vport->name, (unsigned int) vport->type);
795 return "unknown";
796 }
797
798 enum ovs_vport_type
799 netdev_to_ovs_vport_type(const char *type)
800 {
801 if (!strcmp(type, "tap") || !strcmp(type, "system")) {
802 return OVS_VPORT_TYPE_NETDEV;
803 } else if (!strcmp(type, "internal")) {
804 return OVS_VPORT_TYPE_INTERNAL;
805 } else if (strstr(type, "stt")) {
806 return OVS_VPORT_TYPE_STT;
807 } else if (!strcmp(type, "geneve")) {
808 return OVS_VPORT_TYPE_GENEVE;
809 } else if (strstr(type, "gre")) {
810 return OVS_VPORT_TYPE_GRE;
811 } else if (!strcmp(type, "vxlan")) {
812 return OVS_VPORT_TYPE_VXLAN;
813 } else if (!strcmp(type, "lisp")) {
814 return OVS_VPORT_TYPE_LISP;
815 } else {
816 return OVS_VPORT_TYPE_UNSPEC;
817 }
818 }
819
820 static int
821 dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name,
822 enum ovs_vport_type type,
823 struct ofpbuf *options,
824 odp_port_t *port_nop)
825 OVS_REQ_WRLOCK(dpif->upcall_lock)
826 {
827 struct dpif_netlink_vport request, reply;
828 struct ofpbuf *buf;
829 struct nl_sock **socksp = NULL;
830 uint32_t *upcall_pids;
831 int error = 0;
832
833 if (dpif->handlers) {
834 socksp = vport_create_socksp(dpif, &error);
835 if (!socksp) {
836 return error;
837 }
838 }
839
840 dpif_netlink_vport_init(&request);
841 request.cmd = OVS_VPORT_CMD_NEW;
842 request.dp_ifindex = dpif->dp_ifindex;
843 request.type = type;
844 request.name = name;
845
846 request.port_no = *port_nop;
847 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
848 request.n_upcall_pids = socksp ? dpif->n_handlers : 1;
849 request.upcall_pids = upcall_pids;
850
851 if (options) {
852 request.options = options->data;
853 request.options_len = options->size;
854 }
855
856 error = dpif_netlink_vport_transact(&request, &reply, &buf);
857 if (!error) {
858 *port_nop = reply.port_no;
859 } else {
860 if (error == EBUSY && *port_nop != ODPP_NONE) {
861 VLOG_INFO("%s: requested port %"PRIu32" is in use",
862 dpif_name(&dpif->dpif), *port_nop);
863 }
864
865 vport_del_socksp(dpif, socksp);
866 goto exit;
867 }
868
869 if (socksp) {
870 error = vport_add_channels(dpif, *port_nop, socksp);
871 if (error) {
872 VLOG_INFO("%s: could not add channel for port %s",
873 dpif_name(&dpif->dpif), name);
874
875 /* Delete the port. */
876 dpif_netlink_vport_init(&request);
877 request.cmd = OVS_VPORT_CMD_DEL;
878 request.dp_ifindex = dpif->dp_ifindex;
879 request.port_no = *port_nop;
880 dpif_netlink_vport_transact(&request, NULL, NULL);
881 vport_del_socksp(dpif, socksp);
882 goto exit;
883 }
884 }
885 free(socksp);
886
887 exit:
888 ofpbuf_delete(buf);
889 free(upcall_pids);
890
891 return error;
892 }
893
894 static int
895 dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev,
896 odp_port_t *port_nop)
897 OVS_REQ_WRLOCK(dpif->upcall_lock)
898 {
899 const struct netdev_tunnel_config *tnl_cfg;
900 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
901 const char *type = netdev_get_type(netdev);
902 uint64_t options_stub[64 / 8];
903 enum ovs_vport_type ovs_type;
904 struct ofpbuf options;
905 const char *name;
906
907 name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
908
909 ovs_type = netdev_to_ovs_vport_type(netdev_get_type(netdev));
910 if (ovs_type == OVS_VPORT_TYPE_UNSPEC) {
911 VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
912 "unsupported type `%s'",
913 dpif_name(&dpif->dpif), name, type);
914 return EINVAL;
915 }
916
917 if (ovs_type == OVS_VPORT_TYPE_NETDEV) {
918 #ifdef _WIN32
919 /* XXX : Map appropiate Windows handle */
920 #else
921 netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
922 #endif
923 }
924
925 #ifdef _WIN32
926 if (ovs_type == OVS_VPORT_TYPE_INTERNAL) {
927 if (!create_wmi_port(name)){
928 VLOG_ERR("Could not create wmi internal port with name:%s", name);
929 return EINVAL;
930 };
931 }
932 #endif
933
934 tnl_cfg = netdev_get_tunnel_config(netdev);
935 if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) {
936 ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
937 if (tnl_cfg->dst_port) {
938 nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
939 ntohs(tnl_cfg->dst_port));
940 }
941 if (tnl_cfg->exts) {
942 size_t ext_ofs;
943 int i;
944
945 ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
946 for (i = 0; i < 32; i++) {
947 if (tnl_cfg->exts & (1 << i)) {
948 nl_msg_put_flag(&options, i);
949 }
950 }
951 nl_msg_end_nested(&options, ext_ofs);
952 }
953 return dpif_netlink_port_add__(dpif, name, ovs_type, &options,
954 port_nop);
955 } else {
956 return dpif_netlink_port_add__(dpif, name, ovs_type, NULL, port_nop);
957 }
958
959 }
960
961 static int
962 dpif_netlink_rtnl_port_create_and_add(struct dpif_netlink *dpif,
963 struct netdev *netdev,
964 odp_port_t *port_nop)
965 OVS_REQ_WRLOCK(dpif->upcall_lock)
966 {
967 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
968 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
969 const char *name;
970 int error;
971
972 error = dpif_netlink_rtnl_port_create(netdev);
973 if (error) {
974 if (error != EOPNOTSUPP) {
975 VLOG_INFO_RL(&rl, "Failed to create %s with rtnetlink: %s",
976 netdev_get_name(netdev), ovs_strerror(error));
977 }
978 return error;
979 }
980
981 name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
982 error = dpif_netlink_port_add__(dpif, name, OVS_VPORT_TYPE_NETDEV, NULL,
983 port_nop);
984 if (error) {
985 dpif_netlink_rtnl_port_destroy(name, netdev_get_type(netdev));
986 }
987 return error;
988 }
989
990 static int
991 dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
992 odp_port_t *port_nop)
993 {
994 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
995 int error = EOPNOTSUPP;
996
997 fat_rwlock_wrlock(&dpif->upcall_lock);
998 if (!ovs_tunnels_out_of_tree) {
999 error = dpif_netlink_rtnl_port_create_and_add(dpif, netdev, port_nop);
1000 }
1001 if (error) {
1002 error = dpif_netlink_port_add_compat(dpif, netdev, port_nop);
1003 }
1004 fat_rwlock_unlock(&dpif->upcall_lock);
1005
1006 return error;
1007 }
1008
1009 static int
1010 dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
1011 OVS_REQ_WRLOCK(dpif->upcall_lock)
1012 {
1013 struct dpif_netlink_vport vport;
1014 struct dpif_port dpif_port;
1015 int error;
1016
1017 error = dpif_netlink_port_query__(dpif, port_no, NULL, &dpif_port);
1018 if (error) {
1019 return error;
1020 }
1021
1022 dpif_netlink_vport_init(&vport);
1023 vport.cmd = OVS_VPORT_CMD_DEL;
1024 vport.dp_ifindex = dpif->dp_ifindex;
1025 vport.port_no = port_no;
1026 #ifdef _WIN32
1027 if (!strcmp(dpif_port.type, "internal")) {
1028 if (!delete_wmi_port(dpif_port.name)) {
1029 VLOG_ERR("Could not delete wmi port with name: %s",
1030 dpif_port.name);
1031 };
1032 }
1033 #endif
1034 error = dpif_netlink_vport_transact(&vport, NULL, NULL);
1035
1036 vport_del_channels(dpif, port_no);
1037
1038 if (!error && !ovs_tunnels_out_of_tree) {
1039 error = dpif_netlink_rtnl_port_destroy(dpif_port.name, dpif_port.type);
1040 if (error == EOPNOTSUPP) {
1041 error = 0;
1042 }
1043 }
1044
1045 dpif_port_destroy(&dpif_port);
1046
1047 return error;
1048 }
1049
1050 static int
1051 dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
1052 {
1053 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1054 int error;
1055
1056 fat_rwlock_wrlock(&dpif->upcall_lock);
1057 error = dpif_netlink_port_del__(dpif, port_no);
1058 fat_rwlock_unlock(&dpif->upcall_lock);
1059
1060 return error;
1061 }
1062
1063 static int
1064 dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
1065 const char *port_name, struct dpif_port *dpif_port)
1066 {
1067 struct dpif_netlink_vport request;
1068 struct dpif_netlink_vport reply;
1069 struct ofpbuf *buf;
1070 int error;
1071
1072 dpif_netlink_vport_init(&request);
1073 request.cmd = OVS_VPORT_CMD_GET;
1074 request.dp_ifindex = dpif->dp_ifindex;
1075 request.port_no = port_no;
1076 request.name = port_name;
1077
1078 error = dpif_netlink_vport_transact(&request, &reply, &buf);
1079 if (!error) {
1080 if (reply.dp_ifindex != request.dp_ifindex) {
1081 /* A query by name reported that 'port_name' is in some datapath
1082 * other than 'dpif', but the caller wants to know about 'dpif'. */
1083 error = ENODEV;
1084 } else if (dpif_port) {
1085 dpif_port->name = xstrdup(reply.name);
1086 dpif_port->type = xstrdup(get_vport_type(&reply));
1087 dpif_port->port_no = reply.port_no;
1088 }
1089 ofpbuf_delete(buf);
1090 }
1091 return error;
1092 }
1093
1094 static int
1095 dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
1096 struct dpif_port *dpif_port)
1097 {
1098 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1099
1100 return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
1101 }
1102
1103 static int
1104 dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
1105 struct dpif_port *dpif_port)
1106 {
1107 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1108
1109 return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
1110 }
1111
1112 static uint32_t
1113 dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
1114 odp_port_t port_no, uint32_t hash)
1115 OVS_REQ_RDLOCK(dpif->upcall_lock)
1116 {
1117 uint32_t port_idx = odp_to_u32(port_no);
1118 uint32_t pid = 0;
1119
1120 if (dpif->handlers && dpif->uc_array_size > 0) {
1121 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1122 * channel, since it is not heavily loaded. */
1123 uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
1124 struct dpif_handler *h = &dpif->handlers[hash % dpif->n_handlers];
1125
1126 /* Needs to check in case the socket pointer is changed in between
1127 * the holding of upcall_lock. A known case happens when the main
1128 * thread deletes the vport while the handler thread is handling
1129 * the upcall from that port. */
1130 if (h->channels[idx].sock) {
1131 pid = nl_sock_pid(h->channels[idx].sock);
1132 }
1133 }
1134
1135 return pid;
1136 }
1137
1138 static uint32_t
1139 dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no,
1140 uint32_t hash)
1141 {
1142 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1143 uint32_t ret;
1144
1145 fat_rwlock_rdlock(&dpif->upcall_lock);
1146 ret = dpif_netlink_port_get_pid__(dpif, port_no, hash);
1147 fat_rwlock_unlock(&dpif->upcall_lock);
1148
1149 return ret;
1150 }
1151
1152 static int
1153 dpif_netlink_flow_flush(struct dpif *dpif_)
1154 {
1155 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1156 struct dpif_netlink_flow flow;
1157
1158 dpif_netlink_flow_init(&flow);
1159 flow.cmd = OVS_FLOW_CMD_DEL;
1160 flow.dp_ifindex = dpif->dp_ifindex;
1161
1162 if (netdev_is_flow_api_enabled()) {
1163 netdev_ports_flow_flush(dpif_->dpif_class);
1164 }
1165
1166 return dpif_netlink_flow_transact(&flow, NULL, NULL);
1167 }
1168
1169 struct dpif_netlink_port_state {
1170 struct nl_dump dump;
1171 struct ofpbuf buf;
1172 };
1173
1174 static void
1175 dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
1176 struct nl_dump *dump)
1177 {
1178 struct dpif_netlink_vport request;
1179 struct ofpbuf *buf;
1180
1181 dpif_netlink_vport_init(&request);
1182 request.cmd = OVS_VPORT_CMD_GET;
1183 request.dp_ifindex = dpif->dp_ifindex;
1184
1185 buf = ofpbuf_new(1024);
1186 dpif_netlink_vport_to_ofpbuf(&request, buf);
1187 nl_dump_start(dump, NETLINK_GENERIC, buf);
1188 ofpbuf_delete(buf);
1189 }
1190
1191 static int
1192 dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
1193 {
1194 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1195 struct dpif_netlink_port_state *state;
1196
1197 *statep = state = xmalloc(sizeof *state);
1198 dpif_netlink_port_dump_start__(dpif, &state->dump);
1199
1200 ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
1201 return 0;
1202 }
1203
1204 static int
1205 dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
1206 struct nl_dump *dump,
1207 struct dpif_netlink_vport *vport,
1208 struct ofpbuf *buffer)
1209 {
1210 struct ofpbuf buf;
1211 int error;
1212
1213 if (!nl_dump_next(dump, &buf, buffer)) {
1214 return EOF;
1215 }
1216
1217 error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
1218 if (error) {
1219 VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
1220 dpif_name(&dpif->dpif), ovs_strerror(error));
1221 }
1222 return error;
1223 }
1224
1225 static int
1226 dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
1227 struct dpif_port *dpif_port)
1228 {
1229 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1230 struct dpif_netlink_port_state *state = state_;
1231 struct dpif_netlink_vport vport;
1232 int error;
1233
1234 error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
1235 &state->buf);
1236 if (error) {
1237 return error;
1238 }
1239 dpif_port->name = CONST_CAST(char *, vport.name);
1240 dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
1241 dpif_port->port_no = vport.port_no;
1242 return 0;
1243 }
1244
1245 static int
1246 dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
1247 {
1248 struct dpif_netlink_port_state *state = state_;
1249 int error = nl_dump_done(&state->dump);
1250
1251 ofpbuf_uninit(&state->buf);
1252 free(state);
1253 return error;
1254 }
1255
1256 static int
1257 dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
1258 {
1259 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1260
1261 /* Lazily create the Netlink socket to listen for notifications. */
1262 if (!dpif->port_notifier) {
1263 struct nl_sock *sock;
1264 int error;
1265
1266 error = nl_sock_create(NETLINK_GENERIC, &sock);
1267 if (error) {
1268 return error;
1269 }
1270
1271 error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
1272 if (error) {
1273 nl_sock_destroy(sock);
1274 return error;
1275 }
1276 dpif->port_notifier = sock;
1277
1278 /* We have no idea of the current state so report that everything
1279 * changed. */
1280 return ENOBUFS;
1281 }
1282
1283 for (;;) {
1284 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1285 uint64_t buf_stub[4096 / 8];
1286 struct ofpbuf buf;
1287 int error;
1288
1289 ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
1290 error = nl_sock_recv(dpif->port_notifier, &buf, false);
1291 if (!error) {
1292 struct dpif_netlink_vport vport;
1293
1294 error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
1295 if (!error) {
1296 if (vport.dp_ifindex == dpif->dp_ifindex
1297 && (vport.cmd == OVS_VPORT_CMD_NEW
1298 || vport.cmd == OVS_VPORT_CMD_DEL
1299 || vport.cmd == OVS_VPORT_CMD_SET)) {
1300 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
1301 dpif->dpif.full_name, vport.name, vport.cmd);
1302 if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
1303 dpif->refresh_channels = true;
1304 }
1305 *devnamep = xstrdup(vport.name);
1306 ofpbuf_uninit(&buf);
1307 return 0;
1308 }
1309 }
1310 } else if (error != EAGAIN) {
1311 VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
1312 ovs_strerror(error));
1313 nl_sock_drain(dpif->port_notifier);
1314 error = ENOBUFS;
1315 }
1316
1317 ofpbuf_uninit(&buf);
1318 if (error) {
1319 return error;
1320 }
1321 }
1322 }
1323
1324 static void
1325 dpif_netlink_port_poll_wait(const struct dpif *dpif_)
1326 {
1327 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1328
1329 if (dpif->port_notifier) {
1330 nl_sock_wait(dpif->port_notifier, POLLIN);
1331 } else {
1332 poll_immediate_wake();
1333 }
1334 }
1335
1336 static void
1337 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
1338 const ovs_u128 *ufid, bool terse)
1339 {
1340 if (ufid) {
1341 request->ufid = *ufid;
1342 request->ufid_present = true;
1343 } else {
1344 request->ufid_present = false;
1345 }
1346 request->ufid_terse = terse;
1347 }
1348
1349 static void
1350 dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
1351 const struct nlattr *key, size_t key_len,
1352 const ovs_u128 *ufid, bool terse,
1353 struct dpif_netlink_flow *request)
1354 {
1355 dpif_netlink_flow_init(request);
1356 request->cmd = OVS_FLOW_CMD_GET;
1357 request->dp_ifindex = dpif->dp_ifindex;
1358 request->key = key;
1359 request->key_len = key_len;
1360 dpif_netlink_flow_init_ufid(request, ufid, terse);
1361 }
1362
1363 static void
1364 dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
1365 const struct dpif_flow_get *get,
1366 struct dpif_netlink_flow *request)
1367 {
1368 dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
1369 false, request);
1370 }
1371
1372 static int
1373 dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
1374 const struct nlattr *key, size_t key_len,
1375 const ovs_u128 *ufid, bool terse,
1376 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1377 {
1378 struct dpif_netlink_flow request;
1379
1380 dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
1381 return dpif_netlink_flow_transact(&request, reply, bufp);
1382 }
1383
1384 static int
1385 dpif_netlink_flow_get(const struct dpif_netlink *dpif,
1386 const struct dpif_netlink_flow *flow,
1387 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1388 {
1389 return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
1390 flow->ufid_present ? &flow->ufid : NULL,
1391 false, reply, bufp);
1392 }
1393
1394 static void
1395 dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
1396 const struct dpif_flow_put *put,
1397 struct dpif_netlink_flow *request)
1398 {
1399 static const struct nlattr dummy_action;
1400
1401 dpif_netlink_flow_init(request);
1402 request->cmd = (put->flags & DPIF_FP_CREATE
1403 ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
1404 request->dp_ifindex = dpif->dp_ifindex;
1405 request->key = put->key;
1406 request->key_len = put->key_len;
1407 request->mask = put->mask;
1408 request->mask_len = put->mask_len;
1409 dpif_netlink_flow_init_ufid(request, put->ufid, false);
1410
1411 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1412 request->actions = (put->actions
1413 ? put->actions
1414 : CONST_CAST(struct nlattr *, &dummy_action));
1415 request->actions_len = put->actions_len;
1416 if (put->flags & DPIF_FP_ZERO_STATS) {
1417 request->clear = true;
1418 }
1419 if (put->flags & DPIF_FP_PROBE) {
1420 request->probe = true;
1421 }
1422 request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
1423 }
1424
1425 static void
1426 dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
1427 const struct nlattr *key, size_t key_len,
1428 const ovs_u128 *ufid, bool terse,
1429 struct dpif_netlink_flow *request)
1430 {
1431 dpif_netlink_flow_init(request);
1432 request->cmd = OVS_FLOW_CMD_DEL;
1433 request->dp_ifindex = dpif->dp_ifindex;
1434 request->key = key;
1435 request->key_len = key_len;
1436 dpif_netlink_flow_init_ufid(request, ufid, terse);
1437 }
1438
1439 static void
1440 dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
1441 const struct dpif_flow_del *del,
1442 struct dpif_netlink_flow *request)
1443 {
1444 dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
1445 del->ufid, del->terse, request);
1446 }
1447
1448 enum {
1449 DUMP_OVS_FLOWS_BIT = 0,
1450 DUMP_OFFLOADED_FLOWS_BIT = 1,
1451 };
1452
1453 enum {
1454 DUMP_OVS_FLOWS = (1 << DUMP_OVS_FLOWS_BIT),
1455 DUMP_OFFLOADED_FLOWS = (1 << DUMP_OFFLOADED_FLOWS_BIT),
1456 };
1457
1458 struct dpif_netlink_flow_dump {
1459 struct dpif_flow_dump up;
1460 struct nl_dump nl_dump;
1461 atomic_int status;
1462 struct netdev_flow_dump **netdev_dumps;
1463 int netdev_dumps_num; /* Number of netdev_flow_dumps */
1464 struct ovs_mutex netdev_lock; /* Guards the following. */
1465 int netdev_current_dump OVS_GUARDED; /* Shared current dump */
1466 int type; /* Type of dump */
1467 };
1468
1469 static struct dpif_netlink_flow_dump *
1470 dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
1471 {
1472 return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
1473 }
1474
1475 static void
1476 start_netdev_dump(const struct dpif *dpif_,
1477 struct dpif_netlink_flow_dump *dump)
1478 {
1479 ovs_mutex_init(&dump->netdev_lock);
1480
1481 if (!(dump->type & DUMP_OFFLOADED_FLOWS)) {
1482 dump->netdev_dumps_num = 0;
1483 dump->netdev_dumps = NULL;
1484 return;
1485 }
1486
1487 ovs_mutex_lock(&dump->netdev_lock);
1488 dump->netdev_current_dump = 0;
1489 dump->netdev_dumps
1490 = netdev_ports_flow_dump_create(dpif_->dpif_class,
1491 &dump->netdev_dumps_num);
1492 ovs_mutex_unlock(&dump->netdev_lock);
1493 }
1494
1495 static int
1496 dpif_netlink_get_dump_type(char *str) {
1497 int type = 0;
1498
1499 if (!str || !strcmp(str, "ovs") || !strcmp(str, "dpctl")) {
1500 type |= DUMP_OVS_FLOWS;
1501 }
1502 if ((netdev_is_flow_api_enabled() && !str)
1503 || (str && (!strcmp(str, "offloaded") || !strcmp(str, "dpctl")))) {
1504 type |= DUMP_OFFLOADED_FLOWS;
1505 }
1506
1507 return type;
1508 }
1509
1510 static struct dpif_flow_dump *
1511 dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse,
1512 char *type)
1513 {
1514 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1515 struct dpif_netlink_flow_dump *dump;
1516 struct dpif_netlink_flow request;
1517 struct ofpbuf *buf;
1518
1519 dump = xmalloc(sizeof *dump);
1520 dpif_flow_dump_init(&dump->up, dpif_);
1521
1522 dump->type = dpif_netlink_get_dump_type(type);
1523
1524 if (dump->type & DUMP_OVS_FLOWS) {
1525 dpif_netlink_flow_init(&request);
1526 request.cmd = OVS_FLOW_CMD_GET;
1527 request.dp_ifindex = dpif->dp_ifindex;
1528 request.ufid_present = false;
1529 request.ufid_terse = terse;
1530
1531 buf = ofpbuf_new(1024);
1532 dpif_netlink_flow_to_ofpbuf(&request, buf);
1533 nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
1534 ofpbuf_delete(buf);
1535 }
1536 atomic_init(&dump->status, 0);
1537 dump->up.terse = terse;
1538
1539 start_netdev_dump(dpif_, dump);
1540
1541 return &dump->up;
1542 }
1543
1544 static int
1545 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
1546 {
1547 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1548 unsigned int nl_status = 0;
1549 int dump_status;
1550
1551 if (dump->type & DUMP_OVS_FLOWS) {
1552 nl_status = nl_dump_done(&dump->nl_dump);
1553 }
1554
1555 for (int i = 0; i < dump->netdev_dumps_num; i++) {
1556 int err = netdev_flow_dump_destroy(dump->netdev_dumps[i]);
1557
1558 if (err != 0 && err != EOPNOTSUPP) {
1559 VLOG_ERR("failed dumping netdev: %s", ovs_strerror(err));
1560 }
1561 }
1562
1563 free(dump->netdev_dumps);
1564 ovs_mutex_destroy(&dump->netdev_lock);
1565
1566 /* No other thread has access to 'dump' at this point. */
1567 atomic_read_relaxed(&dump->status, &dump_status);
1568 free(dump);
1569 return dump_status ? dump_status : nl_status;
1570 }
1571
1572 struct dpif_netlink_flow_dump_thread {
1573 struct dpif_flow_dump_thread up;
1574 struct dpif_netlink_flow_dump *dump;
1575 struct dpif_netlink_flow flow;
1576 struct dpif_flow_stats stats;
1577 struct ofpbuf nl_flows; /* Always used to store flows. */
1578 struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
1579 int netdev_dump_idx; /* This thread current netdev dump index */
1580 bool netdev_done; /* If we are finished dumping netdevs */
1581
1582 /* (Key/Mask/Actions) Buffers for netdev dumping */
1583 struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
1584 struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
1585 struct odputil_keybuf actbuf[FLOW_DUMP_MAX_BATCH];
1586 };
1587
1588 static struct dpif_netlink_flow_dump_thread *
1589 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
1590 {
1591 return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
1592 }
1593
1594 static struct dpif_flow_dump_thread *
1595 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
1596 {
1597 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1598 struct dpif_netlink_flow_dump_thread *thread;
1599
1600 thread = xmalloc(sizeof *thread);
1601 dpif_flow_dump_thread_init(&thread->up, &dump->up);
1602 thread->dump = dump;
1603 ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
1604 thread->nl_actions = NULL;
1605 thread->netdev_dump_idx = 0;
1606 thread->netdev_done = !(thread->netdev_dump_idx < dump->netdev_dumps_num);
1607
1608 return &thread->up;
1609 }
1610
1611 static void
1612 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
1613 {
1614 struct dpif_netlink_flow_dump_thread *thread
1615 = dpif_netlink_flow_dump_thread_cast(thread_);
1616
1617 ofpbuf_uninit(&thread->nl_flows);
1618 ofpbuf_delete(thread->nl_actions);
1619 free(thread);
1620 }
1621
1622 static void
1623 dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
1624 const struct dpif_netlink_flow *datapath_flow)
1625 {
1626 dpif_flow->key = datapath_flow->key;
1627 dpif_flow->key_len = datapath_flow->key_len;
1628 dpif_flow->mask = datapath_flow->mask;
1629 dpif_flow->mask_len = datapath_flow->mask_len;
1630 dpif_flow->actions = datapath_flow->actions;
1631 dpif_flow->actions_len = datapath_flow->actions_len;
1632 dpif_flow->ufid_present = datapath_flow->ufid_present;
1633 dpif_flow->pmd_id = PMD_ID_NULL;
1634 if (datapath_flow->ufid_present) {
1635 dpif_flow->ufid = datapath_flow->ufid;
1636 } else {
1637 ovs_assert(datapath_flow->key && datapath_flow->key_len);
1638 dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
1639 &dpif_flow->ufid);
1640 }
1641 dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
1642 dpif_flow->offloaded = false;
1643 }
1644
1645 /* The design is such that all threads are working together on the first dump
1646 * to the last, in order (at first they all on dump 0).
1647 * When the first thread finds that the given dump is finished,
1648 * they all move to the next. If two or more threads find the same dump
1649 * is finished at the same time, the first one will advance the shared
1650 * netdev_current_dump and the others will catch up. */
1651 static void
1652 dpif_netlink_advance_netdev_dump(struct dpif_netlink_flow_dump_thread *thread)
1653 {
1654 struct dpif_netlink_flow_dump *dump = thread->dump;
1655
1656 ovs_mutex_lock(&dump->netdev_lock);
1657 /* if we haven't finished (dumped everything) */
1658 if (dump->netdev_current_dump < dump->netdev_dumps_num) {
1659 /* if we are the first to find that current dump is finished
1660 * advance it. */
1661 if (thread->netdev_dump_idx == dump->netdev_current_dump) {
1662 thread->netdev_dump_idx = ++dump->netdev_current_dump;
1663 /* did we just finish the last dump? done. */
1664 if (dump->netdev_current_dump == dump->netdev_dumps_num) {
1665 thread->netdev_done = true;
1666 }
1667 } else {
1668 /* otherwise, we are behind, catch up */
1669 thread->netdev_dump_idx = dump->netdev_current_dump;
1670 }
1671 } else {
1672 /* some other thread finished */
1673 thread->netdev_done = true;
1674 }
1675 ovs_mutex_unlock(&dump->netdev_lock);
1676 }
1677
1678 static int
1679 dpif_netlink_netdev_match_to_dpif_flow(struct match *match,
1680 struct ofpbuf *key_buf,
1681 struct ofpbuf *mask_buf,
1682 struct nlattr *actions,
1683 struct dpif_flow_stats *stats,
1684 ovs_u128 *ufid,
1685 struct dpif_flow *flow,
1686 bool terse OVS_UNUSED)
1687 {
1688
1689 struct odp_flow_key_parms odp_parms = {
1690 .flow = &match->flow,
1691 .mask = &match->wc.masks,
1692 .support = {
1693 .max_vlan_headers = 1,
1694 },
1695 };
1696 size_t offset;
1697
1698 memset(flow, 0, sizeof *flow);
1699
1700 /* Key */
1701 offset = key_buf->size;
1702 flow->key = ofpbuf_tail(key_buf);
1703 odp_flow_key_from_flow(&odp_parms, key_buf);
1704 flow->key_len = key_buf->size - offset;
1705
1706 /* Mask */
1707 offset = mask_buf->size;
1708 flow->mask = ofpbuf_tail(mask_buf);
1709 odp_parms.key_buf = key_buf;
1710 odp_flow_key_from_mask(&odp_parms, mask_buf);
1711 flow->mask_len = mask_buf->size - offset;
1712
1713 /* Actions */
1714 flow->actions = nl_attr_get(actions);
1715 flow->actions_len = nl_attr_get_size(actions);
1716
1717 /* Stats */
1718 memcpy(&flow->stats, stats, sizeof *stats);
1719
1720 /* UFID */
1721 flow->ufid_present = true;
1722 flow->ufid = *ufid;
1723
1724 flow->pmd_id = PMD_ID_NULL;
1725
1726 flow->offloaded = true;
1727
1728 return 0;
1729 }
1730
1731 static int
1732 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
1733 struct dpif_flow *flows, int max_flows)
1734 {
1735 struct dpif_netlink_flow_dump_thread *thread
1736 = dpif_netlink_flow_dump_thread_cast(thread_);
1737 struct dpif_netlink_flow_dump *dump = thread->dump;
1738 struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
1739 int n_flows;
1740
1741 ofpbuf_delete(thread->nl_actions);
1742 thread->nl_actions = NULL;
1743
1744 n_flows = 0;
1745 max_flows = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
1746
1747 while (!thread->netdev_done && n_flows < max_flows) {
1748 struct odputil_keybuf *maskbuf = &thread->maskbuf[n_flows];
1749 struct odputil_keybuf *keybuf = &thread->keybuf[n_flows];
1750 struct odputil_keybuf *actbuf = &thread->actbuf[n_flows];
1751 struct ofpbuf key, mask, act;
1752 struct dpif_flow *f = &flows[n_flows];
1753 int cur = thread->netdev_dump_idx;
1754 struct netdev_flow_dump *netdev_dump = dump->netdev_dumps[cur];
1755 struct match match;
1756 struct nlattr *actions;
1757 struct dpif_flow_stats stats;
1758 ovs_u128 ufid;
1759 bool has_next;
1760
1761 ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
1762 ofpbuf_use_stack(&act, actbuf, sizeof *actbuf);
1763 ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
1764 has_next = netdev_flow_dump_next(netdev_dump, &match,
1765 &actions, &stats,
1766 &ufid,
1767 &thread->nl_flows,
1768 &act);
1769 if (has_next) {
1770 dpif_netlink_netdev_match_to_dpif_flow(&match,
1771 &key, &mask,
1772 actions,
1773 &stats,
1774 &ufid,
1775 f,
1776 dump->up.terse);
1777 n_flows++;
1778 } else {
1779 dpif_netlink_advance_netdev_dump(thread);
1780 }
1781 }
1782
1783 if (!(dump->type & DUMP_OVS_FLOWS)) {
1784 return n_flows;
1785 }
1786
1787 while (!n_flows
1788 || (n_flows < max_flows && thread->nl_flows.size)) {
1789 struct dpif_netlink_flow datapath_flow;
1790 struct ofpbuf nl_flow;
1791 int error;
1792
1793 /* Try to grab another flow. */
1794 if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
1795 break;
1796 }
1797
1798 /* Convert the flow to our output format. */
1799 error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
1800 if (error) {
1801 atomic_store_relaxed(&dump->status, error);
1802 break;
1803 }
1804
1805 if (dump->up.terse || datapath_flow.actions) {
1806 /* Common case: we don't want actions, or the flow includes
1807 * actions. */
1808 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1809 &datapath_flow);
1810 } else {
1811 /* Rare case: the flow does not include actions. Retrieve this
1812 * individual flow again to get the actions. */
1813 error = dpif_netlink_flow_get(dpif, &datapath_flow,
1814 &datapath_flow, &thread->nl_actions);
1815 if (error == ENOENT) {
1816 VLOG_DBG("dumped flow disappeared on get");
1817 continue;
1818 } else if (error) {
1819 VLOG_WARN("error fetching dumped flow: %s",
1820 ovs_strerror(error));
1821 atomic_store_relaxed(&dump->status, error);
1822 break;
1823 }
1824
1825 /* Save this flow. Then exit, because we only have one buffer to
1826 * handle this case. */
1827 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1828 &datapath_flow);
1829 break;
1830 }
1831 }
1832 return n_flows;
1833 }
1834
1835 static void
1836 dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
1837 struct ofpbuf *buf)
1838 {
1839 struct ovs_header *k_exec;
1840 size_t key_ofs;
1841
1842 ofpbuf_prealloc_tailroom(buf, (64
1843 + dp_packet_size(d_exec->packet)
1844 + ODP_KEY_METADATA_SIZE
1845 + d_exec->actions_len));
1846
1847 nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
1848 OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
1849
1850 k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
1851 k_exec->dp_ifindex = dp_ifindex;
1852
1853 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
1854 dp_packet_data(d_exec->packet),
1855 dp_packet_size(d_exec->packet));
1856
1857 key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
1858 odp_key_from_dp_packet(buf, d_exec->packet);
1859 nl_msg_end_nested(buf, key_ofs);
1860
1861 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
1862 d_exec->actions, d_exec->actions_len);
1863 if (d_exec->probe) {
1864 nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE);
1865 }
1866 if (d_exec->mtu) {
1867 nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu);
1868 }
1869 }
1870
1871 /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
1872 * Returns the number actually executed (at least 1, if 'n_ops' is
1873 * positive). */
1874 static size_t
1875 dpif_netlink_operate__(struct dpif_netlink *dpif,
1876 struct dpif_op **ops, size_t n_ops)
1877 {
1878 struct op_auxdata {
1879 struct nl_transaction txn;
1880
1881 struct ofpbuf request;
1882 uint64_t request_stub[1024 / 8];
1883
1884 struct ofpbuf reply;
1885 uint64_t reply_stub[1024 / 8];
1886 } auxes[OPERATE_MAX_OPS];
1887
1888 struct nl_transaction *txnsp[OPERATE_MAX_OPS];
1889 size_t i;
1890
1891 n_ops = MIN(n_ops, OPERATE_MAX_OPS);
1892 for (i = 0; i < n_ops; i++) {
1893 struct op_auxdata *aux = &auxes[i];
1894 struct dpif_op *op = ops[i];
1895 struct dpif_flow_put *put;
1896 struct dpif_flow_del *del;
1897 struct dpif_flow_get *get;
1898 struct dpif_netlink_flow flow;
1899
1900 ofpbuf_use_stub(&aux->request,
1901 aux->request_stub, sizeof aux->request_stub);
1902 aux->txn.request = &aux->request;
1903
1904 ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
1905 aux->txn.reply = NULL;
1906
1907 switch (op->type) {
1908 case DPIF_OP_FLOW_PUT:
1909 put = &op->u.flow_put;
1910 dpif_netlink_init_flow_put(dpif, put, &flow);
1911 if (put->stats) {
1912 flow.nlmsg_flags |= NLM_F_ECHO;
1913 aux->txn.reply = &aux->reply;
1914 }
1915 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1916 break;
1917
1918 case DPIF_OP_FLOW_DEL:
1919 del = &op->u.flow_del;
1920 dpif_netlink_init_flow_del(dpif, del, &flow);
1921 if (del->stats) {
1922 flow.nlmsg_flags |= NLM_F_ECHO;
1923 aux->txn.reply = &aux->reply;
1924 }
1925 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1926 break;
1927
1928 case DPIF_OP_EXECUTE:
1929 /* Can't execute a packet that won't fit in a Netlink attribute. */
1930 if (OVS_UNLIKELY(nl_attr_oversized(
1931 dp_packet_size(op->u.execute.packet)))) {
1932 /* Report an error immediately if this is the first operation.
1933 * Otherwise the easiest thing to do is to postpone to the next
1934 * call (when this will be the first operation). */
1935 if (i == 0) {
1936 VLOG_ERR_RL(&error_rl,
1937 "dropping oversized %"PRIu32"-byte packet",
1938 dp_packet_size(op->u.execute.packet));
1939 op->error = ENOBUFS;
1940 return 1;
1941 }
1942 n_ops = i;
1943 } else {
1944 dpif_netlink_encode_execute(dpif->dp_ifindex, &op->u.execute,
1945 &aux->request);
1946 }
1947 break;
1948
1949 case DPIF_OP_FLOW_GET:
1950 get = &op->u.flow_get;
1951 dpif_netlink_init_flow_get(dpif, get, &flow);
1952 aux->txn.reply = get->buffer;
1953 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1954 break;
1955
1956 default:
1957 OVS_NOT_REACHED();
1958 }
1959 }
1960
1961 for (i = 0; i < n_ops; i++) {
1962 txnsp[i] = &auxes[i].txn;
1963 }
1964 nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
1965
1966 for (i = 0; i < n_ops; i++) {
1967 struct op_auxdata *aux = &auxes[i];
1968 struct nl_transaction *txn = &auxes[i].txn;
1969 struct dpif_op *op = ops[i];
1970 struct dpif_flow_put *put;
1971 struct dpif_flow_del *del;
1972 struct dpif_flow_get *get;
1973
1974 op->error = txn->error;
1975
1976 switch (op->type) {
1977 case DPIF_OP_FLOW_PUT:
1978 put = &op->u.flow_put;
1979 if (put->stats) {
1980 if (!op->error) {
1981 struct dpif_netlink_flow reply;
1982
1983 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1984 txn->reply);
1985 if (!op->error) {
1986 dpif_netlink_flow_get_stats(&reply, put->stats);
1987 }
1988 }
1989 }
1990 break;
1991
1992 case DPIF_OP_FLOW_DEL:
1993 del = &op->u.flow_del;
1994 if (del->stats) {
1995 if (!op->error) {
1996 struct dpif_netlink_flow reply;
1997
1998 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1999 txn->reply);
2000 if (!op->error) {
2001 dpif_netlink_flow_get_stats(&reply, del->stats);
2002 }
2003 }
2004 }
2005 break;
2006
2007 case DPIF_OP_EXECUTE:
2008 break;
2009
2010 case DPIF_OP_FLOW_GET:
2011 get = &op->u.flow_get;
2012 if (!op->error) {
2013 struct dpif_netlink_flow reply;
2014
2015 op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
2016 if (!op->error) {
2017 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
2018 &reply);
2019 }
2020 }
2021 break;
2022
2023 default:
2024 OVS_NOT_REACHED();
2025 }
2026
2027 ofpbuf_uninit(&aux->request);
2028 ofpbuf_uninit(&aux->reply);
2029 }
2030
2031 return n_ops;
2032 }
2033
2034 static int
2035 parse_flow_get(struct dpif_netlink *dpif, struct dpif_flow_get *get)
2036 {
2037 struct dpif_flow *dpif_flow = get->flow;
2038 struct match match;
2039 struct nlattr *actions;
2040 struct dpif_flow_stats stats;
2041 struct ofpbuf buf;
2042 uint64_t act_buf[1024 / 8];
2043 struct odputil_keybuf maskbuf;
2044 struct odputil_keybuf keybuf;
2045 struct odputil_keybuf actbuf;
2046 struct ofpbuf key, mask, act;
2047 int err;
2048
2049 ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
2050 err = netdev_ports_flow_get(dpif->dpif.dpif_class, &match,
2051 &actions, get->ufid, &stats, &buf);
2052 if (err) {
2053 return err;
2054 }
2055
2056 VLOG_DBG("found flow from netdev, translating to dpif flow");
2057
2058 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
2059 ofpbuf_use_stack(&act, &actbuf, sizeof actbuf);
2060 ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
2061 dpif_netlink_netdev_match_to_dpif_flow(&match, &key, &mask, actions,
2062 &stats,
2063 (ovs_u128 *) get->ufid,
2064 dpif_flow,
2065 false);
2066 ofpbuf_put(get->buffer, nl_attr_get(actions), nl_attr_get_size(actions));
2067 dpif_flow->actions = ofpbuf_at(get->buffer, 0, 0);
2068 dpif_flow->actions_len = nl_attr_get_size(actions);
2069
2070 return 0;
2071 }
2072
2073 static int
2074 parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put)
2075 {
2076 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
2077 const struct dpif_class *dpif_class = dpif->dpif.dpif_class;
2078 struct match match;
2079 odp_port_t in_port;
2080 const struct nlattr *nla;
2081 size_t left;
2082 int outputs = 0;
2083 struct netdev *dev;
2084 struct offload_info info;
2085 ovs_be16 dst_port = 0;
2086 int err;
2087
2088 if (put->flags & DPIF_FP_PROBE) {
2089 return EOPNOTSUPP;
2090 }
2091
2092 err = parse_key_and_mask_to_match(put->key, put->key_len, put->mask,
2093 put->mask_len, &match);
2094 if (err) {
2095 return err;
2096 }
2097
2098 /* When we try to install a dummy flow from a probed feature. */
2099 if (match.flow.dl_type == htons(0x1234)) {
2100 return EOPNOTSUPP;
2101 }
2102
2103 in_port = match.flow.in_port.odp_port;
2104 dev = netdev_ports_get(in_port, dpif_class);
2105 if (!dev) {
2106 return EOPNOTSUPP;
2107 }
2108
2109 /* Get tunnel dst port and count outputs */
2110 NL_ATTR_FOR_EACH(nla, left, put->actions, put->actions_len) {
2111 if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
2112 const struct netdev_tunnel_config *tnl_cfg;
2113 struct netdev *outdev;
2114 odp_port_t out_port;
2115
2116 outputs++;
2117 if (outputs > 1) {
2118 VLOG_DBG_RL(&rl, "offloading multiple ports isn't supported");
2119 err = EOPNOTSUPP;
2120 goto out;
2121 }
2122
2123 out_port = nl_attr_get_odp_port(nla);
2124 outdev = netdev_ports_get(out_port, dpif_class);
2125 if (!outdev) {
2126 err = EOPNOTSUPP;
2127 goto out;
2128 }
2129 tnl_cfg = netdev_get_tunnel_config(outdev);
2130 if (tnl_cfg && tnl_cfg->dst_port != 0) {
2131 dst_port = tnl_cfg->dst_port;
2132 }
2133 netdev_close(outdev);
2134 }
2135 }
2136
2137 info.dpif_class = dpif_class;
2138 info.tp_dst_port = dst_port;
2139 err = netdev_flow_put(dev, &match,
2140 CONST_CAST(struct nlattr *, put->actions),
2141 put->actions_len,
2142 CONST_CAST(ovs_u128 *, put->ufid),
2143 &info, put->stats);
2144
2145 if (!err) {
2146 if (put->flags & DPIF_FP_MODIFY) {
2147 struct dpif_op *opp;
2148 struct dpif_op op;
2149
2150 op.type = DPIF_OP_FLOW_DEL;
2151 op.u.flow_del.key = put->key;
2152 op.u.flow_del.key_len = put->key_len;
2153 op.u.flow_del.ufid = put->ufid;
2154 op.u.flow_del.pmd_id = put->pmd_id;
2155 op.u.flow_del.stats = NULL;
2156 op.u.flow_del.terse = false;
2157
2158 opp = &op;
2159 dpif_netlink_operate__(dpif, &opp, 1);
2160 }
2161
2162 VLOG_DBG("added flow");
2163 } else if (err != EEXIST) {
2164 VLOG_ERR_RL(&rl, "failed to offload flow: %s", ovs_strerror(err));
2165 }
2166
2167 out:
2168 if (err && err != EEXIST && (put->flags & DPIF_FP_MODIFY)) {
2169 /* Modified rule can't be offloaded, try and delete from HW */
2170 int del_err = netdev_flow_del(dev, put->ufid, put->stats);
2171
2172 if (!del_err) {
2173 /* Delete from hw success, so old flow was offloaded.
2174 * Change flags to create the flow in kernel */
2175 put->flags &= ~DPIF_FP_MODIFY;
2176 put->flags |= DPIF_FP_CREATE;
2177 } else if (del_err != ENOENT) {
2178 VLOG_ERR_RL(&rl, "failed to delete offloaded flow: %s",
2179 ovs_strerror(del_err));
2180 /* stop proccesing the flow in kernel */
2181 err = 0;
2182 }
2183 }
2184
2185 netdev_close(dev);
2186
2187 return err;
2188 }
2189
2190 static int
2191 try_send_to_netdev(struct dpif_netlink *dpif, struct dpif_op *op)
2192 {
2193 int err = EOPNOTSUPP;
2194
2195 switch (op->type) {
2196 case DPIF_OP_FLOW_PUT: {
2197 struct dpif_flow_put *put = &op->u.flow_put;
2198
2199 if (!put->ufid) {
2200 break;
2201 }
2202
2203 log_flow_put_message(&dpif->dpif, &this_module, put, 0);
2204 err = parse_flow_put(dpif, put);
2205 break;
2206 }
2207 case DPIF_OP_FLOW_DEL: {
2208 struct dpif_flow_del *del = &op->u.flow_del;
2209
2210 if (!del->ufid) {
2211 break;
2212 }
2213
2214 log_flow_del_message(&dpif->dpif, &this_module, del, 0);
2215 err = netdev_ports_flow_del(dpif->dpif.dpif_class, del->ufid,
2216 del->stats);
2217 break;
2218 }
2219 case DPIF_OP_FLOW_GET: {
2220 struct dpif_flow_get *get = &op->u.flow_get;
2221
2222 if (!op->u.flow_get.ufid) {
2223 break;
2224 }
2225
2226 log_flow_get_message(&dpif->dpif, &this_module, get, 0);
2227 err = parse_flow_get(dpif, get);
2228 break;
2229 }
2230 case DPIF_OP_EXECUTE:
2231 default:
2232 break;
2233 }
2234
2235 return err;
2236 }
2237
2238 static void
2239 dpif_netlink_operate_chunks(struct dpif_netlink *dpif, struct dpif_op **ops,
2240 size_t n_ops)
2241 {
2242 while (n_ops > 0) {
2243 size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops);
2244
2245 ops += chunk;
2246 n_ops -= chunk;
2247 }
2248 }
2249
2250 static void
2251 dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops)
2252 {
2253 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2254 struct dpif_op *new_ops[OPERATE_MAX_OPS];
2255 int count = 0;
2256 int i = 0;
2257 int err = 0;
2258
2259 if (netdev_is_flow_api_enabled()) {
2260 while (n_ops > 0) {
2261 count = 0;
2262
2263 while (n_ops > 0 && count < OPERATE_MAX_OPS) {
2264 struct dpif_op *op = ops[i++];
2265
2266 err = try_send_to_netdev(dpif, op);
2267 if (err && err != EEXIST) {
2268 new_ops[count++] = op;
2269 } else {
2270 op->error = err;
2271 }
2272
2273 n_ops--;
2274 }
2275
2276 dpif_netlink_operate_chunks(dpif, new_ops, count);
2277 }
2278 } else {
2279 dpif_netlink_operate_chunks(dpif, ops, n_ops);
2280 }
2281 }
2282
2283 #if _WIN32
2284 static void
2285 dpif_netlink_handler_uninit(struct dpif_handler *handler)
2286 {
2287 vport_delete_sock_pool(handler);
2288 }
2289
2290 static int
2291 dpif_netlink_handler_init(struct dpif_handler *handler)
2292 {
2293 return vport_create_sock_pool(handler);
2294 }
2295 #else
2296
2297 static int
2298 dpif_netlink_handler_init(struct dpif_handler *handler)
2299 {
2300 handler->epoll_fd = epoll_create(10);
2301 return handler->epoll_fd < 0 ? errno : 0;
2302 }
2303
2304 static void
2305 dpif_netlink_handler_uninit(struct dpif_handler *handler)
2306 {
2307 close(handler->epoll_fd);
2308 }
2309 #endif
2310
2311 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
2312 * currently in 'dpif' in the kernel, by adding a new set of channels for
2313 * any kernel vport that lacks one and deleting any channels that have no
2314 * backing kernel vports. */
2315 static int
2316 dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
2317 OVS_REQ_WRLOCK(dpif->upcall_lock)
2318 {
2319 unsigned long int *keep_channels;
2320 struct dpif_netlink_vport vport;
2321 size_t keep_channels_nbits;
2322 struct nl_dump dump;
2323 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
2324 struct ofpbuf buf;
2325 int retval = 0;
2326 size_t i;
2327
2328 ovs_assert(!WINDOWS || n_handlers <= 1);
2329 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
2330
2331 if (dpif->n_handlers != n_handlers) {
2332 destroy_all_channels(dpif);
2333 dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
2334 for (i = 0; i < n_handlers; i++) {
2335 int error;
2336 struct dpif_handler *handler = &dpif->handlers[i];
2337
2338 error = dpif_netlink_handler_init(handler);
2339 if (error) {
2340 size_t j;
2341
2342 for (j = 0; j < i; j++) {
2343 struct dpif_handler *tmp = &dpif->handlers[j];
2344 dpif_netlink_handler_uninit(tmp);
2345 }
2346 free(dpif->handlers);
2347 dpif->handlers = NULL;
2348
2349 return error;
2350 }
2351 }
2352 dpif->n_handlers = n_handlers;
2353 }
2354
2355 for (i = 0; i < n_handlers; i++) {
2356 struct dpif_handler *handler = &dpif->handlers[i];
2357
2358 handler->event_offset = handler->n_events = 0;
2359 }
2360
2361 keep_channels_nbits = dpif->uc_array_size;
2362 keep_channels = bitmap_allocate(keep_channels_nbits);
2363
2364 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
2365 dpif_netlink_port_dump_start__(dpif, &dump);
2366 while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
2367 uint32_t port_no = odp_to_u32(vport.port_no);
2368 uint32_t *upcall_pids = NULL;
2369 int error;
2370
2371 if (port_no >= dpif->uc_array_size
2372 || !vport_get_pids(dpif, port_no, &upcall_pids)) {
2373 struct nl_sock **socksp = vport_create_socksp(dpif, &error);
2374
2375 if (!socksp) {
2376 goto error;
2377 }
2378
2379 error = vport_add_channels(dpif, vport.port_no, socksp);
2380 if (error) {
2381 VLOG_INFO("%s: could not add channels for port %s",
2382 dpif_name(&dpif->dpif), vport.name);
2383 vport_del_socksp(dpif, socksp);
2384 retval = error;
2385 goto error;
2386 }
2387 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
2388 free(socksp);
2389 }
2390
2391 /* Configure the vport to deliver misses to 'sock'. */
2392 if (vport.upcall_pids[0] == 0
2393 || vport.n_upcall_pids != dpif->n_handlers
2394 || memcmp(upcall_pids, vport.upcall_pids, n_handlers * sizeof
2395 *upcall_pids)) {
2396 struct dpif_netlink_vport vport_request;
2397
2398 dpif_netlink_vport_init(&vport_request);
2399 vport_request.cmd = OVS_VPORT_CMD_SET;
2400 vport_request.dp_ifindex = dpif->dp_ifindex;
2401 vport_request.port_no = vport.port_no;
2402 vport_request.n_upcall_pids = dpif->n_handlers;
2403 vport_request.upcall_pids = upcall_pids;
2404 error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
2405 if (error) {
2406 VLOG_WARN_RL(&error_rl,
2407 "%s: failed to set upcall pid on port: %s",
2408 dpif_name(&dpif->dpif), ovs_strerror(error));
2409
2410 if (error != ENODEV && error != ENOENT) {
2411 retval = error;
2412 } else {
2413 /* The vport isn't really there, even though the dump says
2414 * it is. Probably we just hit a race after a port
2415 * disappeared. */
2416 }
2417 goto error;
2418 }
2419 }
2420
2421 if (port_no < keep_channels_nbits) {
2422 bitmap_set1(keep_channels, port_no);
2423 }
2424 free(upcall_pids);
2425 continue;
2426
2427 error:
2428 free(upcall_pids);
2429 vport_del_channels(dpif, vport.port_no);
2430 }
2431 nl_dump_done(&dump);
2432 ofpbuf_uninit(&buf);
2433
2434 /* Discard any saved channels that we didn't reuse. */
2435 for (i = 0; i < keep_channels_nbits; i++) {
2436 if (!bitmap_is_set(keep_channels, i)) {
2437 vport_del_channels(dpif, u32_to_odp(i));
2438 }
2439 }
2440 free(keep_channels);
2441
2442 return retval;
2443 }
2444
2445 static int
2446 dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
2447 OVS_REQ_WRLOCK(dpif->upcall_lock)
2448 {
2449 if ((dpif->handlers != NULL) == enable) {
2450 return 0;
2451 } else if (!enable) {
2452 destroy_all_channels(dpif);
2453 return 0;
2454 } else {
2455 return dpif_netlink_refresh_channels(dpif, 1);
2456 }
2457 }
2458
2459 static int
2460 dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
2461 {
2462 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2463 int error;
2464
2465 fat_rwlock_wrlock(&dpif->upcall_lock);
2466 error = dpif_netlink_recv_set__(dpif, enable);
2467 fat_rwlock_unlock(&dpif->upcall_lock);
2468
2469 return error;
2470 }
2471
2472 static int
2473 dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
2474 {
2475 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2476 int error = 0;
2477
2478 #ifdef _WIN32
2479 /* Multiple upcall handlers will be supported once kernel datapath supports
2480 * it. */
2481 if (n_handlers > 1) {
2482 return error;
2483 }
2484 #endif
2485
2486 fat_rwlock_wrlock(&dpif->upcall_lock);
2487 if (dpif->handlers) {
2488 error = dpif_netlink_refresh_channels(dpif, n_handlers);
2489 }
2490 fat_rwlock_unlock(&dpif->upcall_lock);
2491
2492 return error;
2493 }
2494
2495 static int
2496 dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
2497 uint32_t queue_id, uint32_t *priority)
2498 {
2499 if (queue_id < 0xf000) {
2500 *priority = TC_H_MAKE(1 << 16, queue_id + 1);
2501 return 0;
2502 } else {
2503 return EINVAL;
2504 }
2505 }
2506
2507 static int
2508 parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
2509 struct dpif_upcall *upcall, int *dp_ifindex)
2510 {
2511 static const struct nl_policy ovs_packet_policy[] = {
2512 /* Always present. */
2513 [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
2514 .min_len = ETH_HEADER_LEN },
2515 [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
2516
2517 /* OVS_PACKET_CMD_ACTION only. */
2518 [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
2519 [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
2520 [OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
2521 [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true }
2522 };
2523
2524 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
2525 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2526 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
2527 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2528
2529 struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
2530 if (!nlmsg || !genl || !ovs_header
2531 || nlmsg->nlmsg_type != ovs_packet_family
2532 || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
2533 ARRAY_SIZE(ovs_packet_policy))) {
2534 return EINVAL;
2535 }
2536
2537 int type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
2538 : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
2539 : -1);
2540 if (type < 0) {
2541 return EINVAL;
2542 }
2543
2544 /* (Re)set ALL fields of '*upcall' on successful return. */
2545 upcall->type = type;
2546 upcall->key = CONST_CAST(struct nlattr *,
2547 nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
2548 upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
2549 dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
2550 upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
2551 upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
2552 upcall->actions = a[OVS_PACKET_ATTR_ACTIONS];
2553 upcall->mru = a[OVS_PACKET_ATTR_MRU];
2554
2555 /* Allow overwriting the netlink attribute header without reallocating. */
2556 dp_packet_use_stub(&upcall->packet,
2557 CONST_CAST(struct nlattr *,
2558 nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
2559 nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
2560 sizeof(struct nlattr));
2561 dp_packet_set_data(&upcall->packet,
2562 (char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr));
2563 dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
2564
2565 if (nl_attr_find__(upcall->key, upcall->key_len, OVS_KEY_ATTR_ETHERNET)) {
2566 /* Ethernet frame */
2567 upcall->packet.packet_type = htonl(PT_ETH);
2568 } else {
2569 /* Non-Ethernet packet. Get the Ethertype from the NL attributes */
2570 ovs_be16 ethertype = 0;
2571 const struct nlattr *et_nla = nl_attr_find__(upcall->key,
2572 upcall->key_len,
2573 OVS_KEY_ATTR_ETHERTYPE);
2574 if (et_nla) {
2575 ethertype = nl_attr_get_be16(et_nla);
2576 }
2577 upcall->packet.packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
2578 ntohs(ethertype));
2579 dp_packet_set_l3(&upcall->packet, dp_packet_data(&upcall->packet));
2580 }
2581
2582 *dp_ifindex = ovs_header->dp_ifindex;
2583
2584 return 0;
2585 }
2586
2587 #ifdef _WIN32
2588 #define PACKET_RECV_BATCH_SIZE 50
2589 static int
2590 dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
2591 struct dpif_upcall *upcall, struct ofpbuf *buf)
2592 OVS_REQ_RDLOCK(dpif->upcall_lock)
2593 {
2594 struct dpif_handler *handler;
2595 int read_tries = 0;
2596 struct dpif_windows_vport_sock *sock_pool;
2597 uint32_t i;
2598
2599 if (!dpif->handlers) {
2600 return EAGAIN;
2601 }
2602
2603 /* Only one handler is supported currently. */
2604 if (handler_id >= 1) {
2605 return EAGAIN;
2606 }
2607
2608 if (handler_id >= dpif->n_handlers) {
2609 return EAGAIN;
2610 }
2611
2612 handler = &dpif->handlers[handler_id];
2613 sock_pool = handler->vport_sock_pool;
2614
2615 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2616 for (;;) {
2617 int dp_ifindex;
2618 int error;
2619
2620 if (++read_tries > PACKET_RECV_BATCH_SIZE) {
2621 return EAGAIN;
2622 }
2623
2624 error = nl_sock_recv(sock_pool[i].nl_sock, buf, false);
2625 if (error == ENOBUFS) {
2626 /* ENOBUFS typically means that we've received so many
2627 * packets that the buffer overflowed. Try again
2628 * immediately because there's almost certainly a packet
2629 * waiting for us. */
2630 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2631 continue;
2632 }
2633
2634 /* XXX: ch->last_poll = time_msec(); */
2635 if (error) {
2636 if (error == EAGAIN) {
2637 break;
2638 }
2639 return error;
2640 }
2641
2642 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2643 if (!error && dp_ifindex == dpif->dp_ifindex) {
2644 return 0;
2645 } else if (error) {
2646 return error;
2647 }
2648 }
2649 }
2650
2651 return EAGAIN;
2652 }
2653 #else
2654 static int
2655 dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
2656 struct dpif_upcall *upcall, struct ofpbuf *buf)
2657 OVS_REQ_RDLOCK(dpif->upcall_lock)
2658 {
2659 struct dpif_handler *handler;
2660 int read_tries = 0;
2661
2662 if (!dpif->handlers || handler_id >= dpif->n_handlers) {
2663 return EAGAIN;
2664 }
2665
2666 handler = &dpif->handlers[handler_id];
2667 if (handler->event_offset >= handler->n_events) {
2668 int retval;
2669
2670 handler->event_offset = handler->n_events = 0;
2671
2672 do {
2673 retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
2674 dpif->uc_array_size, 0);
2675 } while (retval < 0 && errno == EINTR);
2676
2677 if (retval < 0) {
2678 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2679 VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
2680 } else if (retval > 0) {
2681 handler->n_events = retval;
2682 }
2683 }
2684
2685 while (handler->event_offset < handler->n_events) {
2686 int idx = handler->epoll_events[handler->event_offset].data.u32;
2687 struct dpif_channel *ch = &dpif->handlers[handler_id].channels[idx];
2688
2689 handler->event_offset++;
2690
2691 for (;;) {
2692 int dp_ifindex;
2693 int error;
2694
2695 if (++read_tries > 50) {
2696 return EAGAIN;
2697 }
2698
2699 error = nl_sock_recv(ch->sock, buf, false);
2700 if (error == ENOBUFS) {
2701 /* ENOBUFS typically means that we've received so many
2702 * packets that the buffer overflowed. Try again
2703 * immediately because there's almost certainly a packet
2704 * waiting for us. */
2705 report_loss(dpif, ch, idx, handler_id);
2706 continue;
2707 }
2708
2709 ch->last_poll = time_msec();
2710 if (error) {
2711 if (error == EAGAIN) {
2712 break;
2713 }
2714 return error;
2715 }
2716
2717 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2718 if (!error && dp_ifindex == dpif->dp_ifindex) {
2719 return 0;
2720 } else if (error) {
2721 return error;
2722 }
2723 }
2724 }
2725
2726 return EAGAIN;
2727 }
2728 #endif
2729
2730 static int
2731 dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
2732 struct dpif_upcall *upcall, struct ofpbuf *buf)
2733 {
2734 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2735 int error;
2736
2737 fat_rwlock_rdlock(&dpif->upcall_lock);
2738 #ifdef _WIN32
2739 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
2740 #else
2741 error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
2742 #endif
2743 fat_rwlock_unlock(&dpif->upcall_lock);
2744
2745 return error;
2746 }
2747
2748 static void
2749 dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
2750 OVS_REQ_RDLOCK(dpif->upcall_lock)
2751 {
2752 #ifdef _WIN32
2753 uint32_t i;
2754 struct dpif_windows_vport_sock *sock_pool =
2755 dpif->handlers[handler_id].vport_sock_pool;
2756
2757 /* Only one handler is supported currently. */
2758 if (handler_id >= 1) {
2759 return;
2760 }
2761
2762 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2763 nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
2764 }
2765 #else
2766 if (dpif->handlers && handler_id < dpif->n_handlers) {
2767 struct dpif_handler *handler = &dpif->handlers[handler_id];
2768
2769 poll_fd_wait(handler->epoll_fd, POLLIN);
2770 }
2771 #endif
2772 }
2773
2774 static void
2775 dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
2776 {
2777 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2778
2779 fat_rwlock_rdlock(&dpif->upcall_lock);
2780 dpif_netlink_recv_wait__(dpif, handler_id);
2781 fat_rwlock_unlock(&dpif->upcall_lock);
2782 }
2783
2784 static void
2785 dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
2786 OVS_REQ_WRLOCK(dpif->upcall_lock)
2787 {
2788 if (dpif->handlers) {
2789 size_t i, j;
2790
2791 for (i = 0; i < dpif->uc_array_size; i++ ) {
2792 if (!dpif->handlers[0].channels[i].sock) {
2793 continue;
2794 }
2795
2796 for (j = 0; j < dpif->n_handlers; j++) {
2797 nl_sock_drain(dpif->handlers[j].channels[i].sock);
2798 }
2799 }
2800 }
2801 }
2802
2803 static void
2804 dpif_netlink_recv_purge(struct dpif *dpif_)
2805 {
2806 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2807
2808 fat_rwlock_wrlock(&dpif->upcall_lock);
2809 dpif_netlink_recv_purge__(dpif);
2810 fat_rwlock_unlock(&dpif->upcall_lock);
2811 }
2812
2813 static char *
2814 dpif_netlink_get_datapath_version(void)
2815 {
2816 char *version_str = NULL;
2817
2818 #ifdef __linux__
2819
2820 #define MAX_VERSION_STR_SIZE 80
2821 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2822 FILE *f;
2823
2824 f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
2825 if (f) {
2826 char *newline;
2827 char version[MAX_VERSION_STR_SIZE];
2828
2829 if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
2830 newline = strchr(version, '\n');
2831 if (newline) {
2832 *newline = '\0';
2833 }
2834 version_str = xstrdup(version);
2835 }
2836 fclose(f);
2837 }
2838 #endif
2839
2840 return version_str;
2841 }
2842
2843 struct dpif_netlink_ct_dump_state {
2844 struct ct_dpif_dump_state up;
2845 struct nl_ct_dump_state *nl_ct_dump;
2846 };
2847
2848 static int
2849 dpif_netlink_ct_dump_start(struct dpif *dpif OVS_UNUSED,
2850 struct ct_dpif_dump_state **dump_,
2851 const uint16_t *zone)
2852 {
2853 struct dpif_netlink_ct_dump_state *dump;
2854 int err;
2855
2856 dump = xzalloc(sizeof *dump);
2857 err = nl_ct_dump_start(&dump->nl_ct_dump, zone);
2858 if (err) {
2859 free(dump);
2860 return err;
2861 }
2862
2863 *dump_ = &dump->up;
2864
2865 return 0;
2866 }
2867
2868 static int
2869 dpif_netlink_ct_dump_next(struct dpif *dpif OVS_UNUSED,
2870 struct ct_dpif_dump_state *dump_,
2871 struct ct_dpif_entry *entry)
2872 {
2873 struct dpif_netlink_ct_dump_state *dump;
2874
2875 INIT_CONTAINER(dump, dump_, up);
2876
2877 return nl_ct_dump_next(dump->nl_ct_dump, entry);
2878 }
2879
2880 static int
2881 dpif_netlink_ct_dump_done(struct dpif *dpif OVS_UNUSED,
2882 struct ct_dpif_dump_state *dump_)
2883 {
2884 struct dpif_netlink_ct_dump_state *dump;
2885 int err;
2886
2887 INIT_CONTAINER(dump, dump_, up);
2888
2889 err = nl_ct_dump_done(dump->nl_ct_dump);
2890 free(dump);
2891 return err;
2892 }
2893
2894 static int
2895 dpif_netlink_ct_flush(struct dpif *dpif OVS_UNUSED, const uint16_t *zone)
2896 {
2897 if (zone) {
2898 return nl_ct_flush_zone(*zone);
2899 } else {
2900 return nl_ct_flush();
2901 }
2902 }
2903
2904 \f
2905 /* Meters */
2906 static void
2907 dpif_netlink_meter_get_features(const struct dpif * dpif OVS_UNUSED,
2908 struct ofputil_meter_features *features)
2909 {
2910 features->max_meters = 0;
2911 features->band_types = 0;
2912 features->capabilities = 0;
2913 features->max_bands = 0;
2914 features->max_color = 0;
2915 }
2916
2917 static int
2918 dpif_netlink_meter_set(struct dpif *dpif OVS_UNUSED,
2919 ofproto_meter_id *meter_id OVS_UNUSED,
2920 struct ofputil_meter_config *config OVS_UNUSED)
2921 {
2922 return EFBIG; /* meter_id out of range */
2923 }
2924
2925 static int
2926 dpif_netlink_meter_get(const struct dpif *dpif OVS_UNUSED,
2927 ofproto_meter_id meter_id OVS_UNUSED,
2928 struct ofputil_meter_stats *stats OVS_UNUSED,
2929 uint16_t n_bands OVS_UNUSED)
2930 {
2931 return EFBIG; /* meter_id out of range */
2932 }
2933
2934 static int
2935 dpif_netlink_meter_del(struct dpif *dpif OVS_UNUSED,
2936 ofproto_meter_id meter_id OVS_UNUSED,
2937 struct ofputil_meter_stats *stats OVS_UNUSED,
2938 uint16_t n_bands OVS_UNUSED)
2939 {
2940 return EFBIG; /* meter_id out of range */
2941 }
2942
2943 \f
2944 const struct dpif_class dpif_netlink_class = {
2945 "system",
2946 NULL, /* init */
2947 dpif_netlink_enumerate,
2948 NULL,
2949 dpif_netlink_open,
2950 dpif_netlink_close,
2951 dpif_netlink_destroy,
2952 dpif_netlink_run,
2953 NULL, /* wait */
2954 dpif_netlink_get_stats,
2955 dpif_netlink_port_add,
2956 dpif_netlink_port_del,
2957 NULL, /* port_set_config */
2958 dpif_netlink_port_query_by_number,
2959 dpif_netlink_port_query_by_name,
2960 dpif_netlink_port_get_pid,
2961 dpif_netlink_port_dump_start,
2962 dpif_netlink_port_dump_next,
2963 dpif_netlink_port_dump_done,
2964 dpif_netlink_port_poll,
2965 dpif_netlink_port_poll_wait,
2966 dpif_netlink_flow_flush,
2967 dpif_netlink_flow_dump_create,
2968 dpif_netlink_flow_dump_destroy,
2969 dpif_netlink_flow_dump_thread_create,
2970 dpif_netlink_flow_dump_thread_destroy,
2971 dpif_netlink_flow_dump_next,
2972 dpif_netlink_operate,
2973 dpif_netlink_recv_set,
2974 dpif_netlink_handlers_set,
2975 NULL, /* set_config */
2976 dpif_netlink_queue_to_priority,
2977 dpif_netlink_recv,
2978 dpif_netlink_recv_wait,
2979 dpif_netlink_recv_purge,
2980 NULL, /* register_dp_purge_cb */
2981 NULL, /* register_upcall_cb */
2982 NULL, /* enable_upcall */
2983 NULL, /* disable_upcall */
2984 dpif_netlink_get_datapath_version, /* get_datapath_version */
2985 dpif_netlink_ct_dump_start,
2986 dpif_netlink_ct_dump_next,
2987 dpif_netlink_ct_dump_done,
2988 dpif_netlink_ct_flush,
2989 dpif_netlink_meter_get_features,
2990 dpif_netlink_meter_set,
2991 dpif_netlink_meter_get,
2992 dpif_netlink_meter_del,
2993 };
2994
2995 static int
2996 dpif_netlink_init(void)
2997 {
2998 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2999 static int error;
3000
3001 if (ovsthread_once_start(&once)) {
3002 error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
3003 &ovs_datapath_family);
3004 if (error) {
3005 VLOG_WARN("Generic Netlink family '%s' does not exist. "
3006 "The Open vSwitch kernel module is probably not loaded.",
3007 OVS_DATAPATH_FAMILY);
3008 }
3009 if (!error) {
3010 error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
3011 }
3012 if (!error) {
3013 error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
3014 }
3015 if (!error) {
3016 error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
3017 &ovs_packet_family);
3018 }
3019 if (!error) {
3020 error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
3021 &ovs_vport_mcgroup);
3022 }
3023
3024 ovs_tunnels_out_of_tree = dpif_netlink_rtnl_probe_oot_tunnels();
3025
3026 ovsthread_once_done(&once);
3027 }
3028
3029 return error;
3030 }
3031
3032 bool
3033 dpif_netlink_is_internal_device(const char *name)
3034 {
3035 struct dpif_netlink_vport reply;
3036 struct ofpbuf *buf;
3037 int error;
3038
3039 error = dpif_netlink_vport_get(name, &reply, &buf);
3040 if (!error) {
3041 ofpbuf_delete(buf);
3042 } else if (error != ENODEV && error != ENOENT) {
3043 VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
3044 name, ovs_strerror(error));
3045 }
3046
3047 return reply.type == OVS_VPORT_TYPE_INTERNAL;
3048 }
3049
3050 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
3051 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
3052 * positive errno value.
3053 *
3054 * 'vport' will contain pointers into 'buf', so the caller should not free
3055 * 'buf' while 'vport' is still in use. */
3056 static int
3057 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
3058 const struct ofpbuf *buf)
3059 {
3060 static const struct nl_policy ovs_vport_policy[] = {
3061 [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
3062 [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
3063 [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
3064 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
3065 [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
3066 .optional = true },
3067 [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
3068 };
3069
3070 dpif_netlink_vport_init(vport);
3071
3072 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
3073 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
3074 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
3075 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
3076
3077 struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
3078 if (!nlmsg || !genl || !ovs_header
3079 || nlmsg->nlmsg_type != ovs_vport_family
3080 || !nl_policy_parse(&b, 0, ovs_vport_policy, a,
3081 ARRAY_SIZE(ovs_vport_policy))) {
3082 return EINVAL;
3083 }
3084
3085 vport->cmd = genl->cmd;
3086 vport->dp_ifindex = ovs_header->dp_ifindex;
3087 vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
3088 vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
3089 vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
3090 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
3091 vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
3092 / (sizeof *vport->upcall_pids);
3093 vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
3094
3095 }
3096 if (a[OVS_VPORT_ATTR_STATS]) {
3097 vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
3098 }
3099 if (a[OVS_VPORT_ATTR_OPTIONS]) {
3100 vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
3101 vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
3102 }
3103 return 0;
3104 }
3105
3106 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
3107 * followed by Netlink attributes corresponding to 'vport'. */
3108 static void
3109 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
3110 struct ofpbuf *buf)
3111 {
3112 struct ovs_header *ovs_header;
3113
3114 nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
3115 vport->cmd, OVS_VPORT_VERSION);
3116
3117 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
3118 ovs_header->dp_ifindex = vport->dp_ifindex;
3119
3120 if (vport->port_no != ODPP_NONE) {
3121 nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
3122 }
3123
3124 if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
3125 nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
3126 }
3127
3128 if (vport->name) {
3129 nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
3130 }
3131
3132 if (vport->upcall_pids) {
3133 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
3134 vport->upcall_pids,
3135 vport->n_upcall_pids * sizeof *vport->upcall_pids);
3136 }
3137
3138 if (vport->stats) {
3139 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
3140 vport->stats, sizeof *vport->stats);
3141 }
3142
3143 if (vport->options) {
3144 nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
3145 vport->options, vport->options_len);
3146 }
3147 }
3148
3149 /* Clears 'vport' to "empty" values. */
3150 void
3151 dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
3152 {
3153 memset(vport, 0, sizeof *vport);
3154 vport->port_no = ODPP_NONE;
3155 }
3156
3157 /* Executes 'request' in the kernel datapath. If the command fails, returns a
3158 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
3159 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
3160 * result of the command is expected to be an ovs_vport also, which is decoded
3161 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
3162 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
3163 int
3164 dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
3165 struct dpif_netlink_vport *reply,
3166 struct ofpbuf **bufp)
3167 {
3168 struct ofpbuf *request_buf;
3169 int error;
3170
3171 ovs_assert((reply != NULL) == (bufp != NULL));
3172
3173 error = dpif_netlink_init();
3174 if (error) {
3175 if (reply) {
3176 *bufp = NULL;
3177 dpif_netlink_vport_init(reply);
3178 }
3179 return error;
3180 }
3181
3182 request_buf = ofpbuf_new(1024);
3183 dpif_netlink_vport_to_ofpbuf(request, request_buf);
3184 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
3185 ofpbuf_delete(request_buf);
3186
3187 if (reply) {
3188 if (!error) {
3189 error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
3190 }
3191 if (error) {
3192 dpif_netlink_vport_init(reply);
3193 ofpbuf_delete(*bufp);
3194 *bufp = NULL;
3195 }
3196 }
3197 return error;
3198 }
3199
3200 /* Obtains information about the kernel vport named 'name' and stores it into
3201 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
3202 * longer needed ('reply' will contain pointers into '*bufp'). */
3203 int
3204 dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
3205 struct ofpbuf **bufp)
3206 {
3207 struct dpif_netlink_vport request;
3208
3209 dpif_netlink_vport_init(&request);
3210 request.cmd = OVS_VPORT_CMD_GET;
3211 request.name = name;
3212
3213 return dpif_netlink_vport_transact(&request, reply, bufp);
3214 }
3215
3216 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
3217 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
3218 * positive errno value.
3219 *
3220 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
3221 * while 'dp' is still in use. */
3222 static int
3223 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
3224 {
3225 static const struct nl_policy ovs_datapath_policy[] = {
3226 [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
3227 [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
3228 .optional = true },
3229 [OVS_DP_ATTR_MEGAFLOW_STATS] = {
3230 NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
3231 .optional = true },
3232 };
3233
3234 dpif_netlink_dp_init(dp);
3235
3236 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
3237 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
3238 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
3239 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
3240
3241 struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
3242 if (!nlmsg || !genl || !ovs_header
3243 || nlmsg->nlmsg_type != ovs_datapath_family
3244 || !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
3245 ARRAY_SIZE(ovs_datapath_policy))) {
3246 return EINVAL;
3247 }
3248
3249 dp->cmd = genl->cmd;
3250 dp->dp_ifindex = ovs_header->dp_ifindex;
3251 dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
3252 if (a[OVS_DP_ATTR_STATS]) {
3253 dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
3254 }
3255
3256 if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
3257 dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
3258 }
3259
3260 return 0;
3261 }
3262
3263 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
3264 static void
3265 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
3266 {
3267 struct ovs_header *ovs_header;
3268
3269 nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
3270 NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
3271 OVS_DATAPATH_VERSION);
3272
3273 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
3274 ovs_header->dp_ifindex = dp->dp_ifindex;
3275
3276 if (dp->name) {
3277 nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
3278 }
3279
3280 if (dp->upcall_pid) {
3281 nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
3282 }
3283
3284 if (dp->user_features) {
3285 nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
3286 }
3287
3288 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
3289 }
3290
3291 /* Clears 'dp' to "empty" values. */
3292 static void
3293 dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
3294 {
3295 memset(dp, 0, sizeof *dp);
3296 }
3297
3298 static void
3299 dpif_netlink_dp_dump_start(struct nl_dump *dump)
3300 {
3301 struct dpif_netlink_dp request;
3302 struct ofpbuf *buf;
3303
3304 dpif_netlink_dp_init(&request);
3305 request.cmd = OVS_DP_CMD_GET;
3306
3307 buf = ofpbuf_new(1024);
3308 dpif_netlink_dp_to_ofpbuf(&request, buf);
3309 nl_dump_start(dump, NETLINK_GENERIC, buf);
3310 ofpbuf_delete(buf);
3311 }
3312
3313 /* Executes 'request' in the kernel datapath. If the command fails, returns a
3314 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
3315 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
3316 * result of the command is expected to be of the same form, which is decoded
3317 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
3318 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
3319 static int
3320 dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
3321 struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
3322 {
3323 struct ofpbuf *request_buf;
3324 int error;
3325
3326 ovs_assert((reply != NULL) == (bufp != NULL));
3327
3328 request_buf = ofpbuf_new(1024);
3329 dpif_netlink_dp_to_ofpbuf(request, request_buf);
3330 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
3331 ofpbuf_delete(request_buf);
3332
3333 if (reply) {
3334 dpif_netlink_dp_init(reply);
3335 if (!error) {
3336 error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
3337 }
3338 if (error) {
3339 ofpbuf_delete(*bufp);
3340 *bufp = NULL;
3341 }
3342 }
3343 return error;
3344 }
3345
3346 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
3347 * The caller must free '*bufp' when the reply is no longer needed ('reply'
3348 * will contain pointers into '*bufp'). */
3349 static int
3350 dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
3351 struct ofpbuf **bufp)
3352 {
3353 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
3354 struct dpif_netlink_dp request;
3355
3356 dpif_netlink_dp_init(&request);
3357 request.cmd = OVS_DP_CMD_GET;
3358 request.dp_ifindex = dpif->dp_ifindex;
3359
3360 return dpif_netlink_dp_transact(&request, reply, bufp);
3361 }
3362
3363 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
3364 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
3365 * positive errno value.
3366 *
3367 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
3368 * while 'flow' is still in use. */
3369 static int
3370 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
3371 const struct ofpbuf *buf)
3372 {
3373 static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
3374 [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
3375 [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
3376 [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
3377 [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
3378 .optional = true },
3379 [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
3380 [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
3381 [OVS_FLOW_ATTR_UFID] = { .type = NL_A_U128, .optional = true },
3382 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
3383 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
3384 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
3385 };
3386
3387 dpif_netlink_flow_init(flow);
3388
3389 struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
3390 struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
3391 struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
3392 struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
3393
3394 struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
3395 if (!nlmsg || !genl || !ovs_header
3396 || nlmsg->nlmsg_type != ovs_flow_family
3397 || !nl_policy_parse(&b, 0, ovs_flow_policy, a,
3398 ARRAY_SIZE(ovs_flow_policy))) {
3399 return EINVAL;
3400 }
3401 if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
3402 return EINVAL;
3403 }
3404
3405 flow->nlmsg_flags = nlmsg->nlmsg_flags;
3406 flow->dp_ifindex = ovs_header->dp_ifindex;
3407 if (a[OVS_FLOW_ATTR_KEY]) {
3408 flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
3409 flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
3410 }
3411
3412 if (a[OVS_FLOW_ATTR_UFID]) {
3413 flow->ufid = nl_attr_get_u128(a[OVS_FLOW_ATTR_UFID]);
3414 flow->ufid_present = true;
3415 }
3416 if (a[OVS_FLOW_ATTR_MASK]) {
3417 flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
3418 flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
3419 }
3420 if (a[OVS_FLOW_ATTR_ACTIONS]) {
3421 flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
3422 flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
3423 }
3424 if (a[OVS_FLOW_ATTR_STATS]) {
3425 flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
3426 }
3427 if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
3428 flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
3429 }
3430 if (a[OVS_FLOW_ATTR_USED]) {
3431 flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
3432 }
3433 return 0;
3434 }
3435
3436
3437 /*
3438 * If PACKET_TYPE attribute is present in 'data', it filters PACKET_TYPE out.
3439 * If the flow is not Ethernet, the OVS_KEY_ATTR_PACKET_TYPE is converted to
3440 * OVS_KEY_ATTR_ETHERTYPE. Puts 'data' to 'buf'.
3441 */
3442 static void
3443 put_exclude_packet_type(struct ofpbuf *buf, uint16_t type,
3444 const struct nlattr *data, uint16_t data_len)
3445 {
3446 const struct nlattr *packet_type;
3447
3448 packet_type = nl_attr_find__(data, data_len, OVS_KEY_ATTR_PACKET_TYPE);
3449
3450 if (packet_type) {
3451 /* exclude PACKET_TYPE Netlink attribute. */
3452 ovs_assert(NLA_ALIGN(packet_type->nla_len) == NL_A_U32_SIZE);
3453 size_t packet_type_len = NL_A_U32_SIZE;
3454 size_t first_chunk_size = (uint8_t *)packet_type - (uint8_t *)data;
3455 size_t second_chunk_size = data_len - first_chunk_size
3456 - packet_type_len;
3457 struct nlattr *next_attr = nl_attr_next(packet_type);
3458 size_t ofs;
3459
3460 ofs = nl_msg_start_nested(buf, type);
3461 nl_msg_put(buf, data, first_chunk_size);
3462 nl_msg_put(buf, next_attr, second_chunk_size);
3463 if (!nl_attr_find__(data, data_len, OVS_KEY_ATTR_ETHERNET)) {
3464 ovs_be16 pt = pt_ns_type_be(nl_attr_get_be32(packet_type));
3465 const struct nlattr *nla;
3466
3467 nla = nl_attr_find(buf, NLA_HDRLEN, OVS_KEY_ATTR_ETHERTYPE);
3468 if (nla) {
3469 ovs_be16 *ethertype;
3470
3471 ethertype = CONST_CAST(ovs_be16 *, nl_attr_get(nla));
3472 *ethertype = pt;
3473 } else {
3474 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, pt);
3475 }
3476 }
3477 nl_msg_end_nested(buf, ofs);
3478 } else {
3479 nl_msg_put_unspec(buf, type, data, data_len);
3480 }
3481 }
3482
3483 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
3484 * followed by Netlink attributes corresponding to 'flow'. */
3485 static void
3486 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
3487 struct ofpbuf *buf)
3488 {
3489 struct ovs_header *ovs_header;
3490
3491 nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
3492 NLM_F_REQUEST | flow->nlmsg_flags,
3493 flow->cmd, OVS_FLOW_VERSION);
3494
3495 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
3496 ovs_header->dp_ifindex = flow->dp_ifindex;
3497
3498 if (flow->ufid_present) {
3499 nl_msg_put_u128(buf, OVS_FLOW_ATTR_UFID, flow->ufid);
3500 }
3501 if (flow->ufid_terse) {
3502 nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
3503 OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
3504 | OVS_UFID_F_OMIT_ACTIONS);
3505 }
3506 if (!flow->ufid_terse || !flow->ufid_present) {
3507 if (flow->key_len) {
3508 put_exclude_packet_type(buf, OVS_FLOW_ATTR_KEY, flow->key,
3509 flow->key_len);
3510 }
3511 if (flow->mask_len) {
3512 put_exclude_packet_type(buf, OVS_FLOW_ATTR_MASK, flow->mask,
3513 flow->mask_len);
3514 }
3515 if (flow->actions || flow->actions_len) {
3516 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
3517 flow->actions, flow->actions_len);
3518 }
3519 }
3520
3521 /* We never need to send these to the kernel. */
3522 ovs_assert(!flow->stats);
3523 ovs_assert(!flow->tcp_flags);
3524 ovs_assert(!flow->used);
3525
3526 if (flow->clear) {
3527 nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
3528 }
3529 if (flow->probe) {
3530 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
3531 }
3532 }
3533
3534 /* Clears 'flow' to "empty" values. */
3535 static void
3536 dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
3537 {
3538 memset(flow, 0, sizeof *flow);
3539 }
3540
3541 /* Executes 'request' in the kernel datapath. If the command fails, returns a
3542 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
3543 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
3544 * result of the command is expected to be a flow also, which is decoded and
3545 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
3546 * is no longer needed ('reply' will contain pointers into '*bufp'). */
3547 static int
3548 dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
3549 struct dpif_netlink_flow *reply,
3550 struct ofpbuf **bufp)
3551 {
3552 struct ofpbuf *request_buf;
3553 int error;
3554
3555 ovs_assert((reply != NULL) == (bufp != NULL));
3556
3557 if (reply) {
3558 request->nlmsg_flags |= NLM_F_ECHO;
3559 }
3560
3561 request_buf = ofpbuf_new(1024);
3562 dpif_netlink_flow_to_ofpbuf(request, request_buf);
3563 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
3564 ofpbuf_delete(request_buf);
3565
3566 if (reply) {
3567 if (!error) {
3568 error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
3569 }
3570 if (error) {
3571 dpif_netlink_flow_init(reply);
3572 ofpbuf_delete(*bufp);
3573 *bufp = NULL;
3574 }
3575 }
3576 return error;
3577 }
3578
3579 static void
3580 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
3581 struct dpif_flow_stats *stats)
3582 {
3583 if (flow->stats) {
3584 stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
3585 stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
3586 } else {
3587 stats->n_packets = 0;
3588 stats->n_bytes = 0;
3589 }
3590 stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
3591 stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
3592 }
3593
3594 /* Logs information about a packet that was recently lost in 'ch' (in
3595 * 'dpif_'). */
3596 static void
3597 report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
3598 uint32_t handler_id)
3599 {
3600 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
3601 struct ds s;
3602
3603 if (VLOG_DROP_WARN(&rl)) {
3604 return;
3605 }
3606
3607 ds_init(&s);
3608 if (ch->last_poll != LLONG_MIN) {
3609 ds_put_format(&s, " (last polled %lld ms ago)",
3610 time_msec() - ch->last_poll);
3611 }
3612
3613 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
3614 dpif_name(&dpif->dpif), ch_idx, handler_id);
3615 ds_destroy(&s);
3616 }