]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif-netlink.c
554c756f1b184531895ef64519df03d1e1e5126c
[mirror_ovs.git] / lib / dpif-netlink.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dpif-netlink.h"
20
21 #include <ctype.h>
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <net/if.h>
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
28 #include <poll.h>
29 #include <stdlib.h>
30 #include <strings.h>
31 #include <sys/epoll.h>
32 #include <sys/stat.h>
33 #include <unistd.h>
34
35 #include "bitmap.h"
36 #include "dpif-provider.h"
37 #include "dynamic-string.h"
38 #include "flow.h"
39 #include "fat-rwlock.h"
40 #include "netdev.h"
41 #include "netdev-linux.h"
42 #include "netdev-vport.h"
43 #include "netlink-conntrack.h"
44 #include "netlink-notifier.h"
45 #include "netlink-socket.h"
46 #include "netlink.h"
47 #include "odp-util.h"
48 #include "ofpbuf.h"
49 #include "packets.h"
50 #include "poll-loop.h"
51 #include "random.h"
52 #include "shash.h"
53 #include "sset.h"
54 #include "timeval.h"
55 #include "unaligned.h"
56 #include "util.h"
57 #include "openvswitch/vlog.h"
58
59 VLOG_DEFINE_THIS_MODULE(dpif_netlink);
60 #ifdef _WIN32
61 enum { WINDOWS = 1 };
62 #else
63 enum { WINDOWS = 0 };
64 #endif
65 enum { MAX_PORTS = USHRT_MAX };
66
67 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
68 * missing if we have old headers. */
69 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
70
71 struct dpif_netlink_dp {
72 /* Generic Netlink header. */
73 uint8_t cmd;
74
75 /* struct ovs_header. */
76 int dp_ifindex;
77
78 /* Attributes. */
79 const char *name; /* OVS_DP_ATTR_NAME. */
80 const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
81 uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
82 const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
83 const struct ovs_dp_megaflow_stats *megaflow_stats;
84 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
85 };
86
87 static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
88 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
89 const struct ofpbuf *);
90 static void dpif_netlink_dp_dump_start(struct nl_dump *);
91 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
92 struct dpif_netlink_dp *reply,
93 struct ofpbuf **bufp);
94 static int dpif_netlink_dp_get(const struct dpif *,
95 struct dpif_netlink_dp *reply,
96 struct ofpbuf **bufp);
97
98 struct dpif_netlink_flow {
99 /* Generic Netlink header. */
100 uint8_t cmd;
101
102 /* struct ovs_header. */
103 unsigned int nlmsg_flags;
104 int dp_ifindex;
105
106 /* Attributes.
107 *
108 * The 'stats' member points to 64-bit data that might only be aligned on
109 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
110 * values.
111 *
112 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
113 * the Netlink version of the command, even if actions_len is zero. */
114 const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
115 size_t key_len;
116 const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
117 size_t mask_len;
118 const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
119 size_t actions_len;
120 ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
121 bool ufid_present; /* Is there a UFID? */
122 bool ufid_terse; /* Skip serializing key/mask/acts? */
123 const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
124 const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
125 const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
126 bool clear; /* OVS_FLOW_ATTR_CLEAR. */
127 bool probe; /* OVS_FLOW_ATTR_PROBE. */
128 };
129
130 static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
131 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
132 const struct ofpbuf *);
133 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
134 struct ofpbuf *);
135 static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
136 struct dpif_netlink_flow *reply,
137 struct ofpbuf **bufp);
138 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
139 struct dpif_flow_stats *);
140 static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
141 const struct dpif_netlink_flow *);
142
143 /* One of the dpif channels between the kernel and userspace. */
144 struct dpif_channel {
145 struct nl_sock *sock; /* Netlink socket. */
146 long long int last_poll; /* Last time this channel was polled. */
147 };
148
149 #ifdef _WIN32
150 #define VPORT_SOCK_POOL_SIZE 1
151 /* On Windows, there is no native support for epoll. There are equivalent
152 * interfaces though, that are not used currently. For simpicity, a pool of
153 * netlink sockets is used. Each socket is represented by 'struct
154 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
155 * sharing the same socket. In the future, we can add a reference count and
156 * such fields. */
157 struct dpif_windows_vport_sock {
158 struct nl_sock *nl_sock; /* netlink socket. */
159 };
160 #endif
161
162 struct dpif_handler {
163 struct dpif_channel *channels;/* Array of channels for each handler. */
164 struct epoll_event *epoll_events;
165 int epoll_fd; /* epoll fd that includes channel socks. */
166 int n_events; /* Num events returned by epoll_wait(). */
167 int event_offset; /* Offset into 'epoll_events'. */
168
169 #ifdef _WIN32
170 /* Pool of sockets. */
171 struct dpif_windows_vport_sock *vport_sock_pool;
172 size_t last_used_pool_idx; /* Index to aid in allocating a
173 socket in the pool to a port. */
174 #endif
175 };
176
177 /* Datapath interface for the openvswitch Linux kernel module. */
178 struct dpif_netlink {
179 struct dpif dpif;
180 int dp_ifindex;
181
182 /* Upcall messages. */
183 struct fat_rwlock upcall_lock;
184 struct dpif_handler *handlers;
185 uint32_t n_handlers; /* Num of upcall handlers. */
186 int uc_array_size; /* Size of 'handler->channels' and */
187 /* 'handler->epoll_events'. */
188
189 /* Change notification. */
190 struct nl_sock *port_notifier; /* vport multicast group subscriber. */
191 bool refresh_channels;
192 };
193
194 static void report_loss(struct dpif_netlink *, struct dpif_channel *,
195 uint32_t ch_idx, uint32_t handler_id);
196
197 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
198
199 /* Generic Netlink family numbers for OVS.
200 *
201 * Initialized by dpif_netlink_init(). */
202 static int ovs_datapath_family;
203 static int ovs_vport_family;
204 static int ovs_flow_family;
205 static int ovs_packet_family;
206
207 /* Generic Netlink multicast groups for OVS.
208 *
209 * Initialized by dpif_netlink_init(). */
210 static unsigned int ovs_vport_mcgroup;
211
212 static int dpif_netlink_init(void);
213 static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
214 static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
215 odp_port_t port_no, uint32_t hash);
216 static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
217 static int dpif_netlink_refresh_channels(struct dpif_netlink *,
218 uint32_t n_handlers);
219 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
220 struct ofpbuf *);
221 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
222 const struct ofpbuf *);
223
224 static struct dpif_netlink *
225 dpif_netlink_cast(const struct dpif *dpif)
226 {
227 dpif_assert_class(dpif, &dpif_netlink_class);
228 return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
229 }
230
231 static int
232 dpif_netlink_enumerate(struct sset *all_dps,
233 const struct dpif_class *dpif_class OVS_UNUSED)
234 {
235 struct nl_dump dump;
236 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
237 struct ofpbuf msg, buf;
238 int error;
239
240 error = dpif_netlink_init();
241 if (error) {
242 return error;
243 }
244
245 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
246 dpif_netlink_dp_dump_start(&dump);
247 while (nl_dump_next(&dump, &msg, &buf)) {
248 struct dpif_netlink_dp dp;
249
250 if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
251 sset_add(all_dps, dp.name);
252 }
253 }
254 ofpbuf_uninit(&buf);
255 return nl_dump_done(&dump);
256 }
257
258 static int
259 dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
260 bool create, struct dpif **dpifp)
261 {
262 struct dpif_netlink_dp dp_request, dp;
263 struct ofpbuf *buf;
264 uint32_t upcall_pid;
265 int error;
266
267 error = dpif_netlink_init();
268 if (error) {
269 return error;
270 }
271
272 /* Create or look up datapath. */
273 dpif_netlink_dp_init(&dp_request);
274 if (create) {
275 dp_request.cmd = OVS_DP_CMD_NEW;
276 upcall_pid = 0;
277 dp_request.upcall_pid = &upcall_pid;
278 } else {
279 /* Use OVS_DP_CMD_SET to report user features */
280 dp_request.cmd = OVS_DP_CMD_SET;
281 }
282 dp_request.name = name;
283 dp_request.user_features |= OVS_DP_F_UNALIGNED;
284 dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
285 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
286 if (error) {
287 return error;
288 }
289
290 error = open_dpif(&dp, dpifp);
291 ofpbuf_delete(buf);
292 return error;
293 }
294
295 static int
296 open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
297 {
298 struct dpif_netlink *dpif;
299
300 dpif = xzalloc(sizeof *dpif);
301 dpif->port_notifier = NULL;
302 fat_rwlock_init(&dpif->upcall_lock);
303
304 dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
305 dp->dp_ifindex, dp->dp_ifindex);
306
307 dpif->dp_ifindex = dp->dp_ifindex;
308 *dpifp = &dpif->dpif;
309
310 return 0;
311 }
312
313 /* Destroys the netlink sockets pointed by the elements in 'socksp'
314 * and frees the 'socksp'. */
315 static void
316 vport_del_socksp__(struct nl_sock **socksp, uint32_t n_socks)
317 {
318 size_t i;
319
320 for (i = 0; i < n_socks; i++) {
321 nl_sock_destroy(socksp[i]);
322 }
323
324 free(socksp);
325 }
326
327 /* Creates an array of netlink sockets. Returns an array of the
328 * corresponding pointers. Records the error in 'error'. */
329 static struct nl_sock **
330 vport_create_socksp__(uint32_t n_socks, int *error)
331 {
332 struct nl_sock **socksp = xzalloc(n_socks * sizeof *socksp);
333 size_t i;
334
335 for (i = 0; i < n_socks; i++) {
336 *error = nl_sock_create(NETLINK_GENERIC, &socksp[i]);
337 if (*error) {
338 goto error;
339 }
340 }
341
342 return socksp;
343
344 error:
345 vport_del_socksp__(socksp, n_socks);
346
347 return NULL;
348 }
349
350 #ifdef _WIN32
351 static void
352 vport_delete_sock_pool(struct dpif_handler *handler)
353 OVS_REQ_WRLOCK(dpif->upcall_lock)
354 {
355 if (handler->vport_sock_pool) {
356 uint32_t i;
357 struct dpif_windows_vport_sock *sock_pool =
358 handler->vport_sock_pool;
359
360 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
361 if (sock_pool[i].nl_sock) {
362 nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
363 nl_sock_destroy(sock_pool[i].nl_sock);
364 sock_pool[i].nl_sock = NULL;
365 }
366 }
367
368 free(handler->vport_sock_pool);
369 handler->vport_sock_pool = NULL;
370 }
371 }
372
373 static int
374 vport_create_sock_pool(struct dpif_handler *handler)
375 OVS_REQ_WRLOCK(dpif->upcall_lock)
376 {
377 struct dpif_windows_vport_sock *sock_pool;
378 size_t i;
379 int error = 0;
380
381 sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
382 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
383 error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
384 if (error) {
385 goto error;
386 }
387
388 /* Enable the netlink socket to receive packets. This is equivalent to
389 * calling nl_sock_join_mcgroup() to receive events. */
390 error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
391 if (error) {
392 goto error;
393 }
394 }
395
396 handler->vport_sock_pool = sock_pool;
397 handler->last_used_pool_idx = 0;
398 return 0;
399
400 error:
401 vport_delete_sock_pool(handler);
402 return error;
403 }
404
405 /* Returns an array pointers to netlink sockets. The sockets are picked from a
406 * pool. Records the error in 'error'. */
407 static struct nl_sock **
408 vport_create_socksp_windows(struct dpif_netlink *dpif, int *error)
409 OVS_REQ_WRLOCK(dpif->upcall_lock)
410 {
411 uint32_t n_socks = dpif->n_handlers;
412 struct nl_sock **socksp;
413 size_t i;
414
415 ovs_assert(n_socks <= 1);
416 socksp = xzalloc(n_socks * sizeof *socksp);
417
418 /* Pick netlink sockets to use in a round-robin fashion from each
419 * handler's pool of sockets. */
420 for (i = 0; i < n_socks; i++) {
421 struct dpif_handler *handler = &dpif->handlers[i];
422 struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
423 size_t index = handler->last_used_pool_idx;
424
425 /* A pool of sockets is allocated when the handler is initialized. */
426 if (sock_pool == NULL) {
427 free(socksp);
428 *error = EINVAL;
429 return NULL;
430 }
431
432 ovs_assert(index < VPORT_SOCK_POOL_SIZE);
433 socksp[i] = sock_pool[index].nl_sock;
434 socksp[i] = sock_pool[index].nl_sock;
435 ovs_assert(socksp[i]);
436 index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
437 handler->last_used_pool_idx = index;
438 }
439
440 return socksp;
441 }
442
443 static void
444 vport_del_socksp_windows(struct dpif_netlink *dpif, struct nl_sock **socksp)
445 {
446 free(socksp);
447 }
448 #endif /* _WIN32 */
449
450 static struct nl_sock **
451 vport_create_socksp(struct dpif_netlink *dpif, int *error)
452 {
453 #ifdef _WIN32
454 return vport_create_socksp_windows(dpif, error);
455 #else
456 return vport_create_socksp__(dpif->n_handlers, error);
457 #endif
458 }
459
460 static void
461 vport_del_socksp(struct dpif_netlink *dpif, struct nl_sock **socksp)
462 {
463 #ifdef _WIN32
464 vport_del_socksp_windows(dpif, socksp);
465 #else
466 vport_del_socksp__(socksp, dpif->n_handlers);
467 #endif
468 }
469
470 /* Given the array of pointers to netlink sockets 'socksp', returns
471 * the array of corresponding pids. If the 'socksp' is NULL, returns
472 * a single-element array of value 0. */
473 static uint32_t *
474 vport_socksp_to_pids(struct nl_sock **socksp, uint32_t n_socks)
475 {
476 uint32_t *pids;
477
478 if (!socksp) {
479 pids = xzalloc(sizeof *pids);
480 } else {
481 size_t i;
482
483 pids = xzalloc(n_socks * sizeof *pids);
484 for (i = 0; i < n_socks; i++) {
485 pids[i] = nl_sock_pid(socksp[i]);
486 }
487 }
488
489 return pids;
490 }
491
492 /* Given the port number 'port_idx', extracts the pids of netlink sockets
493 * associated to the port and assigns it to 'upcall_pids'. */
494 static bool
495 vport_get_pids(struct dpif_netlink *dpif, uint32_t port_idx,
496 uint32_t **upcall_pids)
497 {
498 uint32_t *pids;
499 size_t i;
500
501 /* Since the nl_sock can only be assigned in either all
502 * or none "dpif->handlers" channels, the following check
503 * would suffice. */
504 if (!dpif->handlers[0].channels[port_idx].sock) {
505 return false;
506 }
507 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
508
509 pids = xzalloc(dpif->n_handlers * sizeof *pids);
510
511 for (i = 0; i < dpif->n_handlers; i++) {
512 pids[i] = nl_sock_pid(dpif->handlers[i].channels[port_idx].sock);
513 }
514
515 *upcall_pids = pids;
516
517 return true;
518 }
519
520 static int
521 vport_add_channels(struct dpif_netlink *dpif, odp_port_t port_no,
522 struct nl_sock **socksp)
523 {
524 struct epoll_event event;
525 uint32_t port_idx = odp_to_u32(port_no);
526 size_t i, j;
527 int error;
528
529 if (dpif->handlers == NULL) {
530 return 0;
531 }
532
533 /* We assume that the datapath densely chooses port numbers, which can
534 * therefore be used as an index into 'channels' and 'epoll_events' of
535 * 'dpif->handler'. */
536 if (port_idx >= dpif->uc_array_size) {
537 uint32_t new_size = port_idx + 1;
538
539 if (new_size > MAX_PORTS) {
540 VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
541 dpif_name(&dpif->dpif), port_no);
542 return EFBIG;
543 }
544
545 for (i = 0; i < dpif->n_handlers; i++) {
546 struct dpif_handler *handler = &dpif->handlers[i];
547
548 handler->channels = xrealloc(handler->channels,
549 new_size * sizeof *handler->channels);
550
551 for (j = dpif->uc_array_size; j < new_size; j++) {
552 handler->channels[j].sock = NULL;
553 }
554
555 handler->epoll_events = xrealloc(handler->epoll_events,
556 new_size * sizeof *handler->epoll_events);
557
558 }
559 dpif->uc_array_size = new_size;
560 }
561
562 memset(&event, 0, sizeof event);
563 event.events = EPOLLIN;
564 event.data.u32 = port_idx;
565
566 for (i = 0; i < dpif->n_handlers; i++) {
567 struct dpif_handler *handler = &dpif->handlers[i];
568
569 #ifndef _WIN32
570 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp[i]),
571 &event) < 0) {
572 error = errno;
573 goto error;
574 }
575 #endif
576 dpif->handlers[i].channels[port_idx].sock = socksp[i];
577 dpif->handlers[i].channels[port_idx].last_poll = LLONG_MIN;
578 }
579
580 return 0;
581
582 error:
583 for (j = 0; j < i; j++) {
584 #ifndef _WIN32
585 epoll_ctl(dpif->handlers[j].epoll_fd, EPOLL_CTL_DEL,
586 nl_sock_fd(socksp[j]), NULL);
587 #endif
588 dpif->handlers[j].channels[port_idx].sock = NULL;
589 }
590
591 return error;
592 }
593
594 static void
595 vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
596 {
597 uint32_t port_idx = odp_to_u32(port_no);
598 size_t i;
599
600 if (!dpif->handlers || port_idx >= dpif->uc_array_size) {
601 return;
602 }
603
604 /* Since the sock can only be assigned in either all or none
605 * of "dpif->handlers" channels, the following check would
606 * suffice. */
607 if (!dpif->handlers[0].channels[port_idx].sock) {
608 return;
609 }
610
611 for (i = 0; i < dpif->n_handlers; i++) {
612 struct dpif_handler *handler = &dpif->handlers[i];
613 #ifndef _WIN32
614 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
615 nl_sock_fd(handler->channels[port_idx].sock), NULL);
616 nl_sock_destroy(handler->channels[port_idx].sock);
617 #endif
618 handler->channels[port_idx].sock = NULL;
619 handler->event_offset = handler->n_events = 0;
620 }
621 }
622
623 static void
624 destroy_all_channels(struct dpif_netlink *dpif)
625 OVS_REQ_WRLOCK(dpif->upcall_lock)
626 {
627 unsigned int i;
628
629 if (!dpif->handlers) {
630 return;
631 }
632
633 for (i = 0; i < dpif->uc_array_size; i++ ) {
634 struct dpif_netlink_vport vport_request;
635 uint32_t upcall_pids = 0;
636
637 /* Since the sock can only be assigned in either all or none
638 * of "dpif->handlers" channels, the following check would
639 * suffice. */
640 if (!dpif->handlers[0].channels[i].sock) {
641 continue;
642 }
643
644 /* Turn off upcalls. */
645 dpif_netlink_vport_init(&vport_request);
646 vport_request.cmd = OVS_VPORT_CMD_SET;
647 vport_request.dp_ifindex = dpif->dp_ifindex;
648 vport_request.port_no = u32_to_odp(i);
649 vport_request.n_upcall_pids = 1;
650 vport_request.upcall_pids = &upcall_pids;
651 dpif_netlink_vport_transact(&vport_request, NULL, NULL);
652
653 vport_del_channels(dpif, u32_to_odp(i));
654 }
655
656 for (i = 0; i < dpif->n_handlers; i++) {
657 struct dpif_handler *handler = &dpif->handlers[i];
658
659 dpif_netlink_handler_uninit(handler);
660 free(handler->epoll_events);
661 free(handler->channels);
662 }
663
664 free(dpif->handlers);
665 dpif->handlers = NULL;
666 dpif->n_handlers = 0;
667 dpif->uc_array_size = 0;
668 }
669
670 static void
671 dpif_netlink_close(struct dpif *dpif_)
672 {
673 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
674
675 nl_sock_destroy(dpif->port_notifier);
676
677 fat_rwlock_wrlock(&dpif->upcall_lock);
678 destroy_all_channels(dpif);
679 fat_rwlock_unlock(&dpif->upcall_lock);
680
681 fat_rwlock_destroy(&dpif->upcall_lock);
682 free(dpif);
683 }
684
685 static int
686 dpif_netlink_destroy(struct dpif *dpif_)
687 {
688 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
689 struct dpif_netlink_dp dp;
690
691 dpif_netlink_dp_init(&dp);
692 dp.cmd = OVS_DP_CMD_DEL;
693 dp.dp_ifindex = dpif->dp_ifindex;
694 return dpif_netlink_dp_transact(&dp, NULL, NULL);
695 }
696
697 static bool
698 dpif_netlink_run(struct dpif *dpif_)
699 {
700 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
701
702 if (dpif->refresh_channels) {
703 dpif->refresh_channels = false;
704 fat_rwlock_wrlock(&dpif->upcall_lock);
705 dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
706 fat_rwlock_unlock(&dpif->upcall_lock);
707 }
708 return false;
709 }
710
711 static int
712 dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
713 {
714 struct dpif_netlink_dp dp;
715 struct ofpbuf *buf;
716 int error;
717
718 error = dpif_netlink_dp_get(dpif_, &dp, &buf);
719 if (!error) {
720 memset(stats, 0, sizeof *stats);
721
722 if (dp.stats) {
723 stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
724 stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
725 stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
726 stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
727 }
728
729 if (dp.megaflow_stats) {
730 stats->n_masks = dp.megaflow_stats->n_masks;
731 stats->n_mask_hit = get_32aligned_u64(
732 &dp.megaflow_stats->n_mask_hit);
733 } else {
734 stats->n_masks = UINT32_MAX;
735 stats->n_mask_hit = UINT64_MAX;
736 }
737 ofpbuf_delete(buf);
738 }
739 return error;
740 }
741
742 static const char *
743 get_vport_type(const struct dpif_netlink_vport *vport)
744 {
745 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
746
747 switch (vport->type) {
748 case OVS_VPORT_TYPE_NETDEV: {
749 const char *type = netdev_get_type_from_name(vport->name);
750
751 return type ? type : "system";
752 }
753
754 case OVS_VPORT_TYPE_INTERNAL:
755 return "internal";
756
757 case OVS_VPORT_TYPE_GENEVE:
758 return "geneve";
759
760 case OVS_VPORT_TYPE_GRE:
761 return "gre";
762
763 case OVS_VPORT_TYPE_VXLAN:
764 return "vxlan";
765
766 case OVS_VPORT_TYPE_LISP:
767 return "lisp";
768
769 case OVS_VPORT_TYPE_STT:
770 return "stt";
771
772 case OVS_VPORT_TYPE_UNSPEC:
773 case __OVS_VPORT_TYPE_MAX:
774 break;
775 }
776
777 VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
778 vport->dp_ifindex, vport->name, (unsigned int) vport->type);
779 return "unknown";
780 }
781
782 static enum ovs_vport_type
783 netdev_to_ovs_vport_type(const struct netdev *netdev)
784 {
785 const char *type = netdev_get_type(netdev);
786
787 if (!strcmp(type, "tap") || !strcmp(type, "system")) {
788 return OVS_VPORT_TYPE_NETDEV;
789 } else if (!strcmp(type, "internal")) {
790 return OVS_VPORT_TYPE_INTERNAL;
791 } else if (strstr(type, "stt")) {
792 return OVS_VPORT_TYPE_STT;
793 } else if (!strcmp(type, "geneve")) {
794 return OVS_VPORT_TYPE_GENEVE;
795 } else if (strstr(type, "gre")) {
796 return OVS_VPORT_TYPE_GRE;
797 } else if (!strcmp(type, "vxlan")) {
798 return OVS_VPORT_TYPE_VXLAN;
799 } else if (!strcmp(type, "lisp")) {
800 return OVS_VPORT_TYPE_LISP;
801 } else {
802 return OVS_VPORT_TYPE_UNSPEC;
803 }
804 }
805
806 static int
807 dpif_netlink_port_add__(struct dpif_netlink *dpif, struct netdev *netdev,
808 odp_port_t *port_nop)
809 OVS_REQ_WRLOCK(dpif->upcall_lock)
810 {
811 const struct netdev_tunnel_config *tnl_cfg;
812 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
813 const char *name = netdev_vport_get_dpif_port(netdev,
814 namebuf, sizeof namebuf);
815 const char *type = netdev_get_type(netdev);
816 struct dpif_netlink_vport request, reply;
817 struct ofpbuf *buf;
818 uint64_t options_stub[64 / 8];
819 struct ofpbuf options;
820 struct nl_sock **socksp = NULL;
821 uint32_t *upcall_pids;
822 int error = 0;
823
824 if (dpif->handlers) {
825 socksp = vport_create_socksp(dpif, &error);
826 if (!socksp) {
827 return error;
828 }
829 }
830
831 dpif_netlink_vport_init(&request);
832 request.cmd = OVS_VPORT_CMD_NEW;
833 request.dp_ifindex = dpif->dp_ifindex;
834 request.type = netdev_to_ovs_vport_type(netdev);
835 if (request.type == OVS_VPORT_TYPE_UNSPEC) {
836 VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
837 "unsupported type `%s'",
838 dpif_name(&dpif->dpif), name, type);
839 vport_del_socksp(dpif, socksp);
840 return EINVAL;
841 }
842 request.name = name;
843
844 if (request.type == OVS_VPORT_TYPE_NETDEV) {
845 #ifdef _WIN32
846 /* XXX : Map appropiate Windows handle */
847 #else
848 netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
849 #endif
850 }
851
852 tnl_cfg = netdev_get_tunnel_config(netdev);
853 if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) {
854 ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
855 if (tnl_cfg->dst_port) {
856 nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
857 ntohs(tnl_cfg->dst_port));
858 }
859 if (tnl_cfg->exts) {
860 size_t ext_ofs;
861 int i;
862
863 ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
864 for (i = 0; i < 32; i++) {
865 if (tnl_cfg->exts & (1 << i)) {
866 nl_msg_put_flag(&options, i);
867 }
868 }
869 nl_msg_end_nested(&options, ext_ofs);
870 }
871 request.options = options.data;
872 request.options_len = options.size;
873 }
874
875 request.port_no = *port_nop;
876 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
877 request.n_upcall_pids = socksp ? dpif->n_handlers : 1;
878 request.upcall_pids = upcall_pids;
879
880 error = dpif_netlink_vport_transact(&request, &reply, &buf);
881 if (!error) {
882 *port_nop = reply.port_no;
883 } else {
884 if (error == EBUSY && *port_nop != ODPP_NONE) {
885 VLOG_INFO("%s: requested port %"PRIu32" is in use",
886 dpif_name(&dpif->dpif), *port_nop);
887 }
888
889 vport_del_socksp(dpif, socksp);
890 goto exit;
891 }
892
893 if (socksp) {
894 error = vport_add_channels(dpif, *port_nop, socksp);
895 if (error) {
896 VLOG_INFO("%s: could not add channel for port %s",
897 dpif_name(&dpif->dpif), name);
898
899 /* Delete the port. */
900 dpif_netlink_vport_init(&request);
901 request.cmd = OVS_VPORT_CMD_DEL;
902 request.dp_ifindex = dpif->dp_ifindex;
903 request.port_no = *port_nop;
904 dpif_netlink_vport_transact(&request, NULL, NULL);
905 vport_del_socksp(dpif, socksp);
906 goto exit;
907 }
908 }
909 free(socksp);
910
911 exit:
912 ofpbuf_delete(buf);
913 free(upcall_pids);
914
915 return error;
916 }
917
918 static int
919 dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
920 odp_port_t *port_nop)
921 {
922 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
923 int error;
924
925 fat_rwlock_wrlock(&dpif->upcall_lock);
926 error = dpif_netlink_port_add__(dpif, netdev, port_nop);
927 fat_rwlock_unlock(&dpif->upcall_lock);
928
929 return error;
930 }
931
932 static int
933 dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
934 OVS_REQ_WRLOCK(dpif->upcall_lock)
935 {
936 struct dpif_netlink_vport vport;
937 int error;
938
939 dpif_netlink_vport_init(&vport);
940 vport.cmd = OVS_VPORT_CMD_DEL;
941 vport.dp_ifindex = dpif->dp_ifindex;
942 vport.port_no = port_no;
943 error = dpif_netlink_vport_transact(&vport, NULL, NULL);
944
945 vport_del_channels(dpif, port_no);
946
947 return error;
948 }
949
950 static int
951 dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
952 {
953 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
954 int error;
955
956 fat_rwlock_wrlock(&dpif->upcall_lock);
957 error = dpif_netlink_port_del__(dpif, port_no);
958 fat_rwlock_unlock(&dpif->upcall_lock);
959
960 return error;
961 }
962
963 static int
964 dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
965 const char *port_name, struct dpif_port *dpif_port)
966 {
967 struct dpif_netlink_vport request;
968 struct dpif_netlink_vport reply;
969 struct ofpbuf *buf;
970 int error;
971
972 dpif_netlink_vport_init(&request);
973 request.cmd = OVS_VPORT_CMD_GET;
974 request.dp_ifindex = dpif->dp_ifindex;
975 request.port_no = port_no;
976 request.name = port_name;
977
978 error = dpif_netlink_vport_transact(&request, &reply, &buf);
979 if (!error) {
980 if (reply.dp_ifindex != request.dp_ifindex) {
981 /* A query by name reported that 'port_name' is in some datapath
982 * other than 'dpif', but the caller wants to know about 'dpif'. */
983 error = ENODEV;
984 } else if (dpif_port) {
985 dpif_port->name = xstrdup(reply.name);
986 dpif_port->type = xstrdup(get_vport_type(&reply));
987 dpif_port->port_no = reply.port_no;
988 }
989 ofpbuf_delete(buf);
990 }
991 return error;
992 }
993
994 static int
995 dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
996 struct dpif_port *dpif_port)
997 {
998 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
999
1000 return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
1001 }
1002
1003 static int
1004 dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
1005 struct dpif_port *dpif_port)
1006 {
1007 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1008
1009 return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
1010 }
1011
1012 static uint32_t
1013 dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
1014 odp_port_t port_no, uint32_t hash)
1015 OVS_REQ_RDLOCK(dpif->upcall_lock)
1016 {
1017 uint32_t port_idx = odp_to_u32(port_no);
1018 uint32_t pid = 0;
1019
1020 if (dpif->handlers && dpif->uc_array_size > 0) {
1021 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1022 * channel, since it is not heavily loaded. */
1023 uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
1024 struct dpif_handler *h = &dpif->handlers[hash % dpif->n_handlers];
1025
1026 /* Needs to check in case the socket pointer is changed in between
1027 * the holding of upcall_lock. A known case happens when the main
1028 * thread deletes the vport while the handler thread is handling
1029 * the upcall from that port. */
1030 if (h->channels[idx].sock) {
1031 pid = nl_sock_pid(h->channels[idx].sock);
1032 }
1033 }
1034
1035 return pid;
1036 }
1037
1038 static uint32_t
1039 dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no,
1040 uint32_t hash)
1041 {
1042 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1043 uint32_t ret;
1044
1045 fat_rwlock_rdlock(&dpif->upcall_lock);
1046 ret = dpif_netlink_port_get_pid__(dpif, port_no, hash);
1047 fat_rwlock_unlock(&dpif->upcall_lock);
1048
1049 return ret;
1050 }
1051
1052 static int
1053 dpif_netlink_flow_flush(struct dpif *dpif_)
1054 {
1055 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1056 struct dpif_netlink_flow flow;
1057
1058 dpif_netlink_flow_init(&flow);
1059 flow.cmd = OVS_FLOW_CMD_DEL;
1060 flow.dp_ifindex = dpif->dp_ifindex;
1061 return dpif_netlink_flow_transact(&flow, NULL, NULL);
1062 }
1063
1064 struct dpif_netlink_port_state {
1065 struct nl_dump dump;
1066 struct ofpbuf buf;
1067 };
1068
1069 static void
1070 dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
1071 struct nl_dump *dump)
1072 {
1073 struct dpif_netlink_vport request;
1074 struct ofpbuf *buf;
1075
1076 dpif_netlink_vport_init(&request);
1077 request.cmd = OVS_VPORT_CMD_GET;
1078 request.dp_ifindex = dpif->dp_ifindex;
1079
1080 buf = ofpbuf_new(1024);
1081 dpif_netlink_vport_to_ofpbuf(&request, buf);
1082 nl_dump_start(dump, NETLINK_GENERIC, buf);
1083 ofpbuf_delete(buf);
1084 }
1085
1086 static int
1087 dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
1088 {
1089 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1090 struct dpif_netlink_port_state *state;
1091
1092 *statep = state = xmalloc(sizeof *state);
1093 dpif_netlink_port_dump_start__(dpif, &state->dump);
1094
1095 ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
1096 return 0;
1097 }
1098
1099 static int
1100 dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
1101 struct nl_dump *dump,
1102 struct dpif_netlink_vport *vport,
1103 struct ofpbuf *buffer)
1104 {
1105 struct ofpbuf buf;
1106 int error;
1107
1108 if (!nl_dump_next(dump, &buf, buffer)) {
1109 return EOF;
1110 }
1111
1112 error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
1113 if (error) {
1114 VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
1115 dpif_name(&dpif->dpif), ovs_strerror(error));
1116 }
1117 return error;
1118 }
1119
1120 static int
1121 dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
1122 struct dpif_port *dpif_port)
1123 {
1124 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1125 struct dpif_netlink_port_state *state = state_;
1126 struct dpif_netlink_vport vport;
1127 int error;
1128
1129 error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
1130 &state->buf);
1131 if (error) {
1132 return error;
1133 }
1134 dpif_port->name = CONST_CAST(char *, vport.name);
1135 dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
1136 dpif_port->port_no = vport.port_no;
1137 return 0;
1138 }
1139
1140 static int
1141 dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
1142 {
1143 struct dpif_netlink_port_state *state = state_;
1144 int error = nl_dump_done(&state->dump);
1145
1146 ofpbuf_uninit(&state->buf);
1147 free(state);
1148 return error;
1149 }
1150
1151 static int
1152 dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
1153 {
1154 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1155
1156 /* Lazily create the Netlink socket to listen for notifications. */
1157 if (!dpif->port_notifier) {
1158 struct nl_sock *sock;
1159 int error;
1160
1161 error = nl_sock_create(NETLINK_GENERIC, &sock);
1162 if (error) {
1163 return error;
1164 }
1165
1166 error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
1167 if (error) {
1168 nl_sock_destroy(sock);
1169 return error;
1170 }
1171 dpif->port_notifier = sock;
1172
1173 /* We have no idea of the current state so report that everything
1174 * changed. */
1175 return ENOBUFS;
1176 }
1177
1178 for (;;) {
1179 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1180 uint64_t buf_stub[4096 / 8];
1181 struct ofpbuf buf;
1182 int error;
1183
1184 ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
1185 error = nl_sock_recv(dpif->port_notifier, &buf, false);
1186 if (!error) {
1187 struct dpif_netlink_vport vport;
1188
1189 error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
1190 if (!error) {
1191 if (vport.dp_ifindex == dpif->dp_ifindex
1192 && (vport.cmd == OVS_VPORT_CMD_NEW
1193 || vport.cmd == OVS_VPORT_CMD_DEL
1194 || vport.cmd == OVS_VPORT_CMD_SET)) {
1195 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
1196 dpif->dpif.full_name, vport.name, vport.cmd);
1197 if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
1198 dpif->refresh_channels = true;
1199 }
1200 *devnamep = xstrdup(vport.name);
1201 ofpbuf_uninit(&buf);
1202 return 0;
1203 }
1204 }
1205 } else if (error != EAGAIN) {
1206 VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
1207 ovs_strerror(error));
1208 nl_sock_drain(dpif->port_notifier);
1209 error = ENOBUFS;
1210 }
1211
1212 ofpbuf_uninit(&buf);
1213 if (error) {
1214 return error;
1215 }
1216 }
1217 }
1218
1219 static void
1220 dpif_netlink_port_poll_wait(const struct dpif *dpif_)
1221 {
1222 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1223
1224 if (dpif->port_notifier) {
1225 nl_sock_wait(dpif->port_notifier, POLLIN);
1226 } else {
1227 poll_immediate_wake();
1228 }
1229 }
1230
1231 static void
1232 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
1233 const ovs_u128 *ufid, bool terse)
1234 {
1235 if (ufid) {
1236 request->ufid = *ufid;
1237 request->ufid_present = true;
1238 } else {
1239 request->ufid_present = false;
1240 }
1241 request->ufid_terse = terse;
1242 }
1243
1244 static void
1245 dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
1246 const struct nlattr *key, size_t key_len,
1247 const ovs_u128 *ufid, bool terse,
1248 struct dpif_netlink_flow *request)
1249 {
1250 dpif_netlink_flow_init(request);
1251 request->cmd = OVS_FLOW_CMD_GET;
1252 request->dp_ifindex = dpif->dp_ifindex;
1253 request->key = key;
1254 request->key_len = key_len;
1255 dpif_netlink_flow_init_ufid(request, ufid, terse);
1256 }
1257
1258 static void
1259 dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
1260 const struct dpif_flow_get *get,
1261 struct dpif_netlink_flow *request)
1262 {
1263 dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
1264 false, request);
1265 }
1266
1267 static int
1268 dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
1269 const struct nlattr *key, size_t key_len,
1270 const ovs_u128 *ufid, bool terse,
1271 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1272 {
1273 struct dpif_netlink_flow request;
1274
1275 dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
1276 return dpif_netlink_flow_transact(&request, reply, bufp);
1277 }
1278
1279 static int
1280 dpif_netlink_flow_get(const struct dpif_netlink *dpif,
1281 const struct dpif_netlink_flow *flow,
1282 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1283 {
1284 return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
1285 flow->ufid_present ? &flow->ufid : NULL,
1286 false, reply, bufp);
1287 }
1288
1289 static void
1290 dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
1291 const struct dpif_flow_put *put,
1292 struct dpif_netlink_flow *request)
1293 {
1294 static const struct nlattr dummy_action;
1295
1296 dpif_netlink_flow_init(request);
1297 request->cmd = (put->flags & DPIF_FP_CREATE
1298 ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
1299 request->dp_ifindex = dpif->dp_ifindex;
1300 request->key = put->key;
1301 request->key_len = put->key_len;
1302 request->mask = put->mask;
1303 request->mask_len = put->mask_len;
1304 dpif_netlink_flow_init_ufid(request, put->ufid, false);
1305
1306 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1307 request->actions = (put->actions
1308 ? put->actions
1309 : CONST_CAST(struct nlattr *, &dummy_action));
1310 request->actions_len = put->actions_len;
1311 if (put->flags & DPIF_FP_ZERO_STATS) {
1312 request->clear = true;
1313 }
1314 if (put->flags & DPIF_FP_PROBE) {
1315 request->probe = true;
1316 }
1317 request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
1318 }
1319
1320 static void
1321 dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
1322 const struct nlattr *key, size_t key_len,
1323 const ovs_u128 *ufid, bool terse,
1324 struct dpif_netlink_flow *request)
1325 {
1326 dpif_netlink_flow_init(request);
1327 request->cmd = OVS_FLOW_CMD_DEL;
1328 request->dp_ifindex = dpif->dp_ifindex;
1329 request->key = key;
1330 request->key_len = key_len;
1331 dpif_netlink_flow_init_ufid(request, ufid, terse);
1332 }
1333
1334 static void
1335 dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
1336 const struct dpif_flow_del *del,
1337 struct dpif_netlink_flow *request)
1338 {
1339 dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
1340 del->ufid, del->terse, request);
1341 }
1342
1343 struct dpif_netlink_flow_dump {
1344 struct dpif_flow_dump up;
1345 struct nl_dump nl_dump;
1346 atomic_int status;
1347 };
1348
1349 static struct dpif_netlink_flow_dump *
1350 dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
1351 {
1352 return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
1353 }
1354
1355 static struct dpif_flow_dump *
1356 dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse)
1357 {
1358 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1359 struct dpif_netlink_flow_dump *dump;
1360 struct dpif_netlink_flow request;
1361 struct ofpbuf *buf;
1362
1363 dump = xmalloc(sizeof *dump);
1364 dpif_flow_dump_init(&dump->up, dpif_);
1365
1366 dpif_netlink_flow_init(&request);
1367 request.cmd = OVS_FLOW_CMD_GET;
1368 request.dp_ifindex = dpif->dp_ifindex;
1369 request.ufid_present = false;
1370 request.ufid_terse = terse;
1371
1372 buf = ofpbuf_new(1024);
1373 dpif_netlink_flow_to_ofpbuf(&request, buf);
1374 nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
1375 ofpbuf_delete(buf);
1376 atomic_init(&dump->status, 0);
1377 dump->up.terse = terse;
1378
1379 return &dump->up;
1380 }
1381
1382 static int
1383 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
1384 {
1385 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1386 unsigned int nl_status = nl_dump_done(&dump->nl_dump);
1387 int dump_status;
1388
1389 /* No other thread has access to 'dump' at this point. */
1390 atomic_read_relaxed(&dump->status, &dump_status);
1391 free(dump);
1392 return dump_status ? dump_status : nl_status;
1393 }
1394
1395 struct dpif_netlink_flow_dump_thread {
1396 struct dpif_flow_dump_thread up;
1397 struct dpif_netlink_flow_dump *dump;
1398 struct dpif_netlink_flow flow;
1399 struct dpif_flow_stats stats;
1400 struct ofpbuf nl_flows; /* Always used to store flows. */
1401 struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
1402 };
1403
1404 static struct dpif_netlink_flow_dump_thread *
1405 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
1406 {
1407 return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
1408 }
1409
1410 static struct dpif_flow_dump_thread *
1411 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
1412 {
1413 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1414 struct dpif_netlink_flow_dump_thread *thread;
1415
1416 thread = xmalloc(sizeof *thread);
1417 dpif_flow_dump_thread_init(&thread->up, &dump->up);
1418 thread->dump = dump;
1419 ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
1420 thread->nl_actions = NULL;
1421
1422 return &thread->up;
1423 }
1424
1425 static void
1426 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
1427 {
1428 struct dpif_netlink_flow_dump_thread *thread
1429 = dpif_netlink_flow_dump_thread_cast(thread_);
1430
1431 ofpbuf_uninit(&thread->nl_flows);
1432 ofpbuf_delete(thread->nl_actions);
1433 free(thread);
1434 }
1435
1436 static void
1437 dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
1438 const struct dpif_netlink_flow *datapath_flow)
1439 {
1440 dpif_flow->key = datapath_flow->key;
1441 dpif_flow->key_len = datapath_flow->key_len;
1442 dpif_flow->mask = datapath_flow->mask;
1443 dpif_flow->mask_len = datapath_flow->mask_len;
1444 dpif_flow->actions = datapath_flow->actions;
1445 dpif_flow->actions_len = datapath_flow->actions_len;
1446 dpif_flow->ufid_present = datapath_flow->ufid_present;
1447 dpif_flow->pmd_id = PMD_ID_NULL;
1448 if (datapath_flow->ufid_present) {
1449 dpif_flow->ufid = datapath_flow->ufid;
1450 } else {
1451 ovs_assert(datapath_flow->key && datapath_flow->key_len);
1452 dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
1453 &dpif_flow->ufid);
1454 }
1455 dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
1456 }
1457
1458 static int
1459 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
1460 struct dpif_flow *flows, int max_flows)
1461 {
1462 struct dpif_netlink_flow_dump_thread *thread
1463 = dpif_netlink_flow_dump_thread_cast(thread_);
1464 struct dpif_netlink_flow_dump *dump = thread->dump;
1465 struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
1466 int n_flows;
1467
1468 ofpbuf_delete(thread->nl_actions);
1469 thread->nl_actions = NULL;
1470
1471 n_flows = 0;
1472 while (!n_flows
1473 || (n_flows < max_flows && thread->nl_flows.size)) {
1474 struct dpif_netlink_flow datapath_flow;
1475 struct ofpbuf nl_flow;
1476 int error;
1477
1478 /* Try to grab another flow. */
1479 if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
1480 break;
1481 }
1482
1483 /* Convert the flow to our output format. */
1484 error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
1485 if (error) {
1486 atomic_store_relaxed(&dump->status, error);
1487 break;
1488 }
1489
1490 if (dump->up.terse || datapath_flow.actions) {
1491 /* Common case: we don't want actions, or the flow includes
1492 * actions. */
1493 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1494 &datapath_flow);
1495 } else {
1496 /* Rare case: the flow does not include actions. Retrieve this
1497 * individual flow again to get the actions. */
1498 error = dpif_netlink_flow_get(dpif, &datapath_flow,
1499 &datapath_flow, &thread->nl_actions);
1500 if (error == ENOENT) {
1501 VLOG_DBG("dumped flow disappeared on get");
1502 continue;
1503 } else if (error) {
1504 VLOG_WARN("error fetching dumped flow: %s",
1505 ovs_strerror(error));
1506 atomic_store_relaxed(&dump->status, error);
1507 break;
1508 }
1509
1510 /* Save this flow. Then exit, because we only have one buffer to
1511 * handle this case. */
1512 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1513 &datapath_flow);
1514 break;
1515 }
1516 }
1517 return n_flows;
1518 }
1519
1520 static void
1521 dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
1522 struct ofpbuf *buf)
1523 {
1524 struct ovs_header *k_exec;
1525 size_t key_ofs;
1526
1527 ofpbuf_prealloc_tailroom(buf, (64
1528 + dp_packet_size(d_exec->packet)
1529 + ODP_KEY_METADATA_SIZE
1530 + d_exec->actions_len));
1531
1532 nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
1533 OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
1534
1535 k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
1536 k_exec->dp_ifindex = dp_ifindex;
1537
1538 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
1539 dp_packet_data(d_exec->packet),
1540 dp_packet_size(d_exec->packet));
1541
1542 key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
1543 odp_key_from_pkt_metadata(buf, &d_exec->packet->md);
1544 nl_msg_end_nested(buf, key_ofs);
1545
1546 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
1547 d_exec->actions, d_exec->actions_len);
1548 if (d_exec->probe) {
1549 nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE);
1550 }
1551 if (d_exec->mtu) {
1552 nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu);
1553 }
1554 }
1555
1556 /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
1557 * Returns the number actually executed (at least 1, if 'n_ops' is
1558 * positive). */
1559 static size_t
1560 dpif_netlink_operate__(struct dpif_netlink *dpif,
1561 struct dpif_op **ops, size_t n_ops)
1562 {
1563 enum { MAX_OPS = 50 };
1564
1565 struct op_auxdata {
1566 struct nl_transaction txn;
1567
1568 struct ofpbuf request;
1569 uint64_t request_stub[1024 / 8];
1570
1571 struct ofpbuf reply;
1572 uint64_t reply_stub[1024 / 8];
1573 } auxes[MAX_OPS];
1574
1575 struct nl_transaction *txnsp[MAX_OPS];
1576 size_t i;
1577
1578 n_ops = MIN(n_ops, MAX_OPS);
1579 for (i = 0; i < n_ops; i++) {
1580 struct op_auxdata *aux = &auxes[i];
1581 struct dpif_op *op = ops[i];
1582 struct dpif_flow_put *put;
1583 struct dpif_flow_del *del;
1584 struct dpif_flow_get *get;
1585 struct dpif_netlink_flow flow;
1586
1587 ofpbuf_use_stub(&aux->request,
1588 aux->request_stub, sizeof aux->request_stub);
1589 aux->txn.request = &aux->request;
1590
1591 ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
1592 aux->txn.reply = NULL;
1593
1594 switch (op->type) {
1595 case DPIF_OP_FLOW_PUT:
1596 put = &op->u.flow_put;
1597 dpif_netlink_init_flow_put(dpif, put, &flow);
1598 if (put->stats) {
1599 flow.nlmsg_flags |= NLM_F_ECHO;
1600 aux->txn.reply = &aux->reply;
1601 }
1602 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1603 break;
1604
1605 case DPIF_OP_FLOW_DEL:
1606 del = &op->u.flow_del;
1607 dpif_netlink_init_flow_del(dpif, del, &flow);
1608 if (del->stats) {
1609 flow.nlmsg_flags |= NLM_F_ECHO;
1610 aux->txn.reply = &aux->reply;
1611 }
1612 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1613 break;
1614
1615 case DPIF_OP_EXECUTE:
1616 /* Can't execute a packet that won't fit in a Netlink attribute. */
1617 if (OVS_UNLIKELY(nl_attr_oversized(
1618 dp_packet_size(op->u.execute.packet)))) {
1619 /* Report an error immediately if this is the first operation.
1620 * Otherwise the easiest thing to do is to postpone to the next
1621 * call (when this will be the first operation). */
1622 if (i == 0) {
1623 VLOG_ERR_RL(&error_rl,
1624 "dropping oversized %"PRIu32"-byte packet",
1625 dp_packet_size(op->u.execute.packet));
1626 op->error = ENOBUFS;
1627 return 1;
1628 }
1629 n_ops = i;
1630 } else {
1631 dpif_netlink_encode_execute(dpif->dp_ifindex, &op->u.execute,
1632 &aux->request);
1633 }
1634 break;
1635
1636 case DPIF_OP_FLOW_GET:
1637 get = &op->u.flow_get;
1638 dpif_netlink_init_flow_get(dpif, get, &flow);
1639 aux->txn.reply = get->buffer;
1640 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1641 break;
1642
1643 default:
1644 OVS_NOT_REACHED();
1645 }
1646 }
1647
1648 for (i = 0; i < n_ops; i++) {
1649 txnsp[i] = &auxes[i].txn;
1650 }
1651 nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
1652
1653 for (i = 0; i < n_ops; i++) {
1654 struct op_auxdata *aux = &auxes[i];
1655 struct nl_transaction *txn = &auxes[i].txn;
1656 struct dpif_op *op = ops[i];
1657 struct dpif_flow_put *put;
1658 struct dpif_flow_del *del;
1659 struct dpif_flow_get *get;
1660
1661 op->error = txn->error;
1662
1663 switch (op->type) {
1664 case DPIF_OP_FLOW_PUT:
1665 put = &op->u.flow_put;
1666 if (put->stats) {
1667 if (!op->error) {
1668 struct dpif_netlink_flow reply;
1669
1670 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1671 txn->reply);
1672 if (!op->error) {
1673 dpif_netlink_flow_get_stats(&reply, put->stats);
1674 }
1675 }
1676 }
1677 break;
1678
1679 case DPIF_OP_FLOW_DEL:
1680 del = &op->u.flow_del;
1681 if (del->stats) {
1682 if (!op->error) {
1683 struct dpif_netlink_flow reply;
1684
1685 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1686 txn->reply);
1687 if (!op->error) {
1688 dpif_netlink_flow_get_stats(&reply, del->stats);
1689 }
1690 }
1691 }
1692 break;
1693
1694 case DPIF_OP_EXECUTE:
1695 break;
1696
1697 case DPIF_OP_FLOW_GET:
1698 get = &op->u.flow_get;
1699 if (!op->error) {
1700 struct dpif_netlink_flow reply;
1701
1702 op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
1703 if (!op->error) {
1704 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
1705 &reply);
1706 }
1707 }
1708 break;
1709
1710 default:
1711 OVS_NOT_REACHED();
1712 }
1713
1714 ofpbuf_uninit(&aux->request);
1715 ofpbuf_uninit(&aux->reply);
1716 }
1717
1718 return n_ops;
1719 }
1720
1721 static void
1722 dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops)
1723 {
1724 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1725
1726 while (n_ops > 0) {
1727 size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops);
1728 ops += chunk;
1729 n_ops -= chunk;
1730 }
1731 }
1732
1733 #if _WIN32
1734 static void
1735 dpif_netlink_handler_uninit(struct dpif_handler *handler)
1736 {
1737 vport_delete_sock_pool(handler);
1738 }
1739
1740 static int
1741 dpif_netlink_handler_init(struct dpif_handler *handler)
1742 {
1743 return vport_create_sock_pool(handler);
1744 }
1745 #else
1746
1747 static int
1748 dpif_netlink_handler_init(struct dpif_handler *handler)
1749 {
1750 handler->epoll_fd = epoll_create(10);
1751 return handler->epoll_fd < 0 ? errno : 0;
1752 }
1753
1754 static void
1755 dpif_netlink_handler_uninit(struct dpif_handler *handler)
1756 {
1757 close(handler->epoll_fd);
1758 }
1759 #endif
1760
1761 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
1762 * currently in 'dpif' in the kernel, by adding a new set of channels for
1763 * any kernel vport that lacks one and deleting any channels that have no
1764 * backing kernel vports. */
1765 static int
1766 dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
1767 OVS_REQ_WRLOCK(dpif->upcall_lock)
1768 {
1769 unsigned long int *keep_channels;
1770 struct dpif_netlink_vport vport;
1771 size_t keep_channels_nbits;
1772 struct nl_dump dump;
1773 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
1774 struct ofpbuf buf;
1775 int retval = 0;
1776 size_t i;
1777
1778 ovs_assert(!WINDOWS || n_handlers <= 1);
1779 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
1780
1781 if (dpif->n_handlers != n_handlers) {
1782 destroy_all_channels(dpif);
1783 dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
1784 for (i = 0; i < n_handlers; i++) {
1785 int error;
1786 struct dpif_handler *handler = &dpif->handlers[i];
1787
1788 error = dpif_netlink_handler_init(handler);
1789 if (error) {
1790 size_t j;
1791 struct dpif_handler *tmp = &dpif->handlers[i];
1792
1793
1794 for (j = 0; j < i; j++) {
1795 dpif_netlink_handler_uninit(tmp);
1796 }
1797 free(dpif->handlers);
1798 dpif->handlers = NULL;
1799
1800 return error;
1801 }
1802 }
1803 dpif->n_handlers = n_handlers;
1804 }
1805
1806 for (i = 0; i < n_handlers; i++) {
1807 struct dpif_handler *handler = &dpif->handlers[i];
1808
1809 handler->event_offset = handler->n_events = 0;
1810 }
1811
1812 keep_channels_nbits = dpif->uc_array_size;
1813 keep_channels = bitmap_allocate(keep_channels_nbits);
1814
1815 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
1816 dpif_netlink_port_dump_start__(dpif, &dump);
1817 while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
1818 uint32_t port_no = odp_to_u32(vport.port_no);
1819 uint32_t *upcall_pids = NULL;
1820 int error;
1821
1822 if (port_no >= dpif->uc_array_size
1823 || !vport_get_pids(dpif, port_no, &upcall_pids)) {
1824 struct nl_sock **socksp = vport_create_socksp(dpif, &error);
1825
1826 if (!socksp) {
1827 goto error;
1828 }
1829
1830 error = vport_add_channels(dpif, vport.port_no, socksp);
1831 if (error) {
1832 VLOG_INFO("%s: could not add channels for port %s",
1833 dpif_name(&dpif->dpif), vport.name);
1834 vport_del_socksp(dpif, socksp);
1835 retval = error;
1836 goto error;
1837 }
1838 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
1839 free(socksp);
1840 }
1841
1842 /* Configure the vport to deliver misses to 'sock'. */
1843 if (vport.upcall_pids[0] == 0
1844 || vport.n_upcall_pids != dpif->n_handlers
1845 || memcmp(upcall_pids, vport.upcall_pids, n_handlers * sizeof
1846 *upcall_pids)) {
1847 struct dpif_netlink_vport vport_request;
1848
1849 dpif_netlink_vport_init(&vport_request);
1850 vport_request.cmd = OVS_VPORT_CMD_SET;
1851 vport_request.dp_ifindex = dpif->dp_ifindex;
1852 vport_request.port_no = vport.port_no;
1853 vport_request.n_upcall_pids = dpif->n_handlers;
1854 vport_request.upcall_pids = upcall_pids;
1855 error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
1856 if (error) {
1857 VLOG_WARN_RL(&error_rl,
1858 "%s: failed to set upcall pid on port: %s",
1859 dpif_name(&dpif->dpif), ovs_strerror(error));
1860
1861 if (error != ENODEV && error != ENOENT) {
1862 retval = error;
1863 } else {
1864 /* The vport isn't really there, even though the dump says
1865 * it is. Probably we just hit a race after a port
1866 * disappeared. */
1867 }
1868 goto error;
1869 }
1870 }
1871
1872 if (port_no < keep_channels_nbits) {
1873 bitmap_set1(keep_channels, port_no);
1874 }
1875 free(upcall_pids);
1876 continue;
1877
1878 error:
1879 free(upcall_pids);
1880 vport_del_channels(dpif, vport.port_no);
1881 }
1882 nl_dump_done(&dump);
1883 ofpbuf_uninit(&buf);
1884
1885 /* Discard any saved channels that we didn't reuse. */
1886 for (i = 0; i < keep_channels_nbits; i++) {
1887 if (!bitmap_is_set(keep_channels, i)) {
1888 vport_del_channels(dpif, u32_to_odp(i));
1889 }
1890 }
1891 free(keep_channels);
1892
1893 return retval;
1894 }
1895
1896 static int
1897 dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
1898 OVS_REQ_WRLOCK(dpif->upcall_lock)
1899 {
1900 if ((dpif->handlers != NULL) == enable) {
1901 return 0;
1902 } else if (!enable) {
1903 destroy_all_channels(dpif);
1904 return 0;
1905 } else {
1906 return dpif_netlink_refresh_channels(dpif, 1);
1907 }
1908 }
1909
1910 static int
1911 dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
1912 {
1913 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1914 int error;
1915
1916 fat_rwlock_wrlock(&dpif->upcall_lock);
1917 error = dpif_netlink_recv_set__(dpif, enable);
1918 fat_rwlock_unlock(&dpif->upcall_lock);
1919
1920 return error;
1921 }
1922
1923 static int
1924 dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
1925 {
1926 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1927 int error = 0;
1928
1929 #ifdef _WIN32
1930 /* Multiple upcall handlers will be supported once kernel datapath supports
1931 * it. */
1932 if (n_handlers > 1) {
1933 return error;
1934 }
1935 #endif
1936
1937 fat_rwlock_wrlock(&dpif->upcall_lock);
1938 if (dpif->handlers) {
1939 error = dpif_netlink_refresh_channels(dpif, n_handlers);
1940 }
1941 fat_rwlock_unlock(&dpif->upcall_lock);
1942
1943 return error;
1944 }
1945
1946 static int
1947 dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
1948 uint32_t queue_id, uint32_t *priority)
1949 {
1950 if (queue_id < 0xf000) {
1951 *priority = TC_H_MAKE(1 << 16, queue_id + 1);
1952 return 0;
1953 } else {
1954 return EINVAL;
1955 }
1956 }
1957
1958 static int
1959 parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
1960 struct dpif_upcall *upcall, int *dp_ifindex)
1961 {
1962 static const struct nl_policy ovs_packet_policy[] = {
1963 /* Always present. */
1964 [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
1965 .min_len = ETH_HEADER_LEN },
1966 [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
1967
1968 /* OVS_PACKET_CMD_ACTION only. */
1969 [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
1970 [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
1971 [OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
1972 [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true }
1973 };
1974
1975 struct ovs_header *ovs_header;
1976 struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
1977 struct nlmsghdr *nlmsg;
1978 struct genlmsghdr *genl;
1979 struct ofpbuf b;
1980 int type;
1981
1982 ofpbuf_use_const(&b, buf->data, buf->size);
1983
1984 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
1985 genl = ofpbuf_try_pull(&b, sizeof *genl);
1986 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
1987 if (!nlmsg || !genl || !ovs_header
1988 || nlmsg->nlmsg_type != ovs_packet_family
1989 || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
1990 ARRAY_SIZE(ovs_packet_policy))) {
1991 return EINVAL;
1992 }
1993
1994 type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
1995 : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
1996 : -1);
1997 if (type < 0) {
1998 return EINVAL;
1999 }
2000
2001 /* (Re)set ALL fields of '*upcall' on successful return. */
2002 upcall->type = type;
2003 upcall->key = CONST_CAST(struct nlattr *,
2004 nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
2005 upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
2006 dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
2007 upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
2008 upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
2009 upcall->actions = a[OVS_PACKET_ATTR_ACTIONS];
2010 upcall->mru = a[OVS_PACKET_ATTR_MRU];
2011
2012 /* Allow overwriting the netlink attribute header without reallocating. */
2013 dp_packet_use_stub(&upcall->packet,
2014 CONST_CAST(struct nlattr *,
2015 nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
2016 nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
2017 sizeof(struct nlattr));
2018 dp_packet_set_data(&upcall->packet,
2019 (char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr));
2020 dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
2021
2022 *dp_ifindex = ovs_header->dp_ifindex;
2023
2024 return 0;
2025 }
2026
2027 #ifdef _WIN32
2028 #define PACKET_RECV_BATCH_SIZE 50
2029 static int
2030 dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
2031 struct dpif_upcall *upcall, struct ofpbuf *buf)
2032 OVS_REQ_RDLOCK(dpif->upcall_lock)
2033 {
2034 struct dpif_handler *handler;
2035 int read_tries = 0;
2036 struct dpif_windows_vport_sock *sock_pool;
2037 uint32_t i;
2038
2039 if (!dpif->handlers) {
2040 return EAGAIN;
2041 }
2042
2043 /* Only one handler is supported currently. */
2044 if (handler_id >= 1) {
2045 return EAGAIN;
2046 }
2047
2048 if (handler_id >= dpif->n_handlers) {
2049 return EAGAIN;
2050 }
2051
2052 handler = &dpif->handlers[handler_id];
2053 sock_pool = handler->vport_sock_pool;
2054
2055 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2056 for (;;) {
2057 int dp_ifindex;
2058 int error;
2059
2060 if (++read_tries > PACKET_RECV_BATCH_SIZE) {
2061 return EAGAIN;
2062 }
2063
2064 error = nl_sock_recv(sock_pool[i].nl_sock, buf, false);
2065 if (error == ENOBUFS) {
2066 /* ENOBUFS typically means that we've received so many
2067 * packets that the buffer overflowed. Try again
2068 * immediately because there's almost certainly a packet
2069 * waiting for us. */
2070 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2071 continue;
2072 }
2073
2074 /* XXX: ch->last_poll = time_msec(); */
2075 if (error) {
2076 if (error == EAGAIN) {
2077 break;
2078 }
2079 return error;
2080 }
2081
2082 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2083 if (!error && dp_ifindex == dpif->dp_ifindex) {
2084 return 0;
2085 } else if (error) {
2086 return error;
2087 }
2088 }
2089 }
2090
2091 return EAGAIN;
2092 }
2093 #else
2094 static int
2095 dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
2096 struct dpif_upcall *upcall, struct ofpbuf *buf)
2097 OVS_REQ_RDLOCK(dpif->upcall_lock)
2098 {
2099 struct dpif_handler *handler;
2100 int read_tries = 0;
2101
2102 if (!dpif->handlers || handler_id >= dpif->n_handlers) {
2103 return EAGAIN;
2104 }
2105
2106 handler = &dpif->handlers[handler_id];
2107 if (handler->event_offset >= handler->n_events) {
2108 int retval;
2109
2110 handler->event_offset = handler->n_events = 0;
2111
2112 do {
2113 retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
2114 dpif->uc_array_size, 0);
2115 } while (retval < 0 && errno == EINTR);
2116
2117 if (retval < 0) {
2118 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2119 VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
2120 } else if (retval > 0) {
2121 handler->n_events = retval;
2122 }
2123 }
2124
2125 while (handler->event_offset < handler->n_events) {
2126 int idx = handler->epoll_events[handler->event_offset].data.u32;
2127 struct dpif_channel *ch = &dpif->handlers[handler_id].channels[idx];
2128
2129 handler->event_offset++;
2130
2131 for (;;) {
2132 int dp_ifindex;
2133 int error;
2134
2135 if (++read_tries > 50) {
2136 return EAGAIN;
2137 }
2138
2139 error = nl_sock_recv(ch->sock, buf, false);
2140 if (error == ENOBUFS) {
2141 /* ENOBUFS typically means that we've received so many
2142 * packets that the buffer overflowed. Try again
2143 * immediately because there's almost certainly a packet
2144 * waiting for us. */
2145 report_loss(dpif, ch, idx, handler_id);
2146 continue;
2147 }
2148
2149 ch->last_poll = time_msec();
2150 if (error) {
2151 if (error == EAGAIN) {
2152 break;
2153 }
2154 return error;
2155 }
2156
2157 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2158 if (!error && dp_ifindex == dpif->dp_ifindex) {
2159 return 0;
2160 } else if (error) {
2161 return error;
2162 }
2163 }
2164 }
2165
2166 return EAGAIN;
2167 }
2168 #endif
2169
2170 static int
2171 dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
2172 struct dpif_upcall *upcall, struct ofpbuf *buf)
2173 {
2174 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2175 int error;
2176
2177 fat_rwlock_rdlock(&dpif->upcall_lock);
2178 #ifdef _WIN32
2179 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
2180 #else
2181 error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
2182 #endif
2183 fat_rwlock_unlock(&dpif->upcall_lock);
2184
2185 return error;
2186 }
2187
2188 static void
2189 dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
2190 OVS_REQ_RDLOCK(dpif->upcall_lock)
2191 {
2192 #ifdef _WIN32
2193 uint32_t i;
2194 struct dpif_windows_vport_sock *sock_pool =
2195 dpif->handlers[handler_id].vport_sock_pool;
2196
2197 /* Only one handler is supported currently. */
2198 if (handler_id >= 1) {
2199 return;
2200 }
2201
2202 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2203 nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
2204 }
2205 #else
2206 if (dpif->handlers && handler_id < dpif->n_handlers) {
2207 struct dpif_handler *handler = &dpif->handlers[handler_id];
2208
2209 poll_fd_wait(handler->epoll_fd, POLLIN);
2210 }
2211 #endif
2212 }
2213
2214 static void
2215 dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
2216 {
2217 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2218
2219 fat_rwlock_rdlock(&dpif->upcall_lock);
2220 dpif_netlink_recv_wait__(dpif, handler_id);
2221 fat_rwlock_unlock(&dpif->upcall_lock);
2222 }
2223
2224 static void
2225 dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
2226 OVS_REQ_WRLOCK(dpif->upcall_lock)
2227 {
2228 if (dpif->handlers) {
2229 size_t i, j;
2230
2231 for (i = 0; i < dpif->uc_array_size; i++ ) {
2232 if (!dpif->handlers[0].channels[i].sock) {
2233 continue;
2234 }
2235
2236 for (j = 0; j < dpif->n_handlers; j++) {
2237 nl_sock_drain(dpif->handlers[j].channels[i].sock);
2238 }
2239 }
2240 }
2241 }
2242
2243 static void
2244 dpif_netlink_recv_purge(struct dpif *dpif_)
2245 {
2246 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2247
2248 fat_rwlock_wrlock(&dpif->upcall_lock);
2249 dpif_netlink_recv_purge__(dpif);
2250 fat_rwlock_unlock(&dpif->upcall_lock);
2251 }
2252
2253 static char *
2254 dpif_netlink_get_datapath_version(void)
2255 {
2256 char *version_str = NULL;
2257
2258 #ifdef __linux__
2259
2260 #define MAX_VERSION_STR_SIZE 80
2261 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2262 FILE *f;
2263
2264 f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
2265 if (f) {
2266 char *newline;
2267 char version[MAX_VERSION_STR_SIZE];
2268
2269 if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
2270 newline = strchr(version, '\n');
2271 if (newline) {
2272 *newline = '\0';
2273 }
2274 version_str = xstrdup(version);
2275 }
2276 fclose(f);
2277 }
2278 #endif
2279
2280 return version_str;
2281 }
2282
2283 #ifdef __linux__
2284 struct dpif_netlink_ct_dump_state {
2285 struct ct_dpif_dump_state up;
2286 struct nl_ct_dump_state *nl_ct_dump;
2287 };
2288
2289 static int
2290 dpif_netlink_ct_dump_start(struct dpif *dpif OVS_UNUSED,
2291 struct ct_dpif_dump_state **dump_,
2292 const uint16_t *zone)
2293 {
2294 struct dpif_netlink_ct_dump_state *dump;
2295 int err;
2296
2297 dump = xzalloc(sizeof *dump);
2298 err = nl_ct_dump_start(&dump->nl_ct_dump, zone);
2299 if (err) {
2300 free(dump);
2301 return err;
2302 }
2303
2304 *dump_ = &dump->up;
2305
2306 return 0;
2307 }
2308
2309 static int
2310 dpif_netlink_ct_dump_next(struct dpif *dpif OVS_UNUSED,
2311 struct ct_dpif_dump_state *dump_,
2312 struct ct_dpif_entry *entry)
2313 {
2314 struct dpif_netlink_ct_dump_state *dump;
2315
2316 INIT_CONTAINER(dump, dump_, up);
2317
2318 return nl_ct_dump_next(dump->nl_ct_dump, entry);
2319 }
2320
2321 static int
2322 dpif_netlink_ct_dump_done(struct dpif *dpif OVS_UNUSED,
2323 struct ct_dpif_dump_state *dump_)
2324 {
2325 struct dpif_netlink_ct_dump_state *dump;
2326 int err;
2327
2328 INIT_CONTAINER(dump, dump_, up);
2329
2330 err = nl_ct_dump_done(dump->nl_ct_dump);
2331 free(dump);
2332 return err;
2333 }
2334 #endif
2335
2336 const struct dpif_class dpif_netlink_class = {
2337 "system",
2338 NULL, /* init */
2339 dpif_netlink_enumerate,
2340 NULL,
2341 dpif_netlink_open,
2342 dpif_netlink_close,
2343 dpif_netlink_destroy,
2344 dpif_netlink_run,
2345 NULL, /* wait */
2346 dpif_netlink_get_stats,
2347 dpif_netlink_port_add,
2348 dpif_netlink_port_del,
2349 dpif_netlink_port_query_by_number,
2350 dpif_netlink_port_query_by_name,
2351 dpif_netlink_port_get_pid,
2352 dpif_netlink_port_dump_start,
2353 dpif_netlink_port_dump_next,
2354 dpif_netlink_port_dump_done,
2355 dpif_netlink_port_poll,
2356 dpif_netlink_port_poll_wait,
2357 dpif_netlink_flow_flush,
2358 dpif_netlink_flow_dump_create,
2359 dpif_netlink_flow_dump_destroy,
2360 dpif_netlink_flow_dump_thread_create,
2361 dpif_netlink_flow_dump_thread_destroy,
2362 dpif_netlink_flow_dump_next,
2363 dpif_netlink_operate,
2364 dpif_netlink_recv_set,
2365 dpif_netlink_handlers_set,
2366 NULL, /* poll_thread_set */
2367 dpif_netlink_queue_to_priority,
2368 dpif_netlink_recv,
2369 dpif_netlink_recv_wait,
2370 dpif_netlink_recv_purge,
2371 NULL, /* register_dp_purge_cb */
2372 NULL, /* register_upcall_cb */
2373 NULL, /* enable_upcall */
2374 NULL, /* disable_upcall */
2375 dpif_netlink_get_datapath_version, /* get_datapath_version */
2376 #ifdef __linux__
2377 dpif_netlink_ct_dump_start,
2378 dpif_netlink_ct_dump_next,
2379 dpif_netlink_ct_dump_done,
2380 #else
2381 NULL, /* ct_dump_start */
2382 NULL, /* ct_dump_next */
2383 NULL, /* ct_dump_done */
2384 #endif
2385 NULL, /* ct_flush */
2386 };
2387
2388 static int
2389 dpif_netlink_init(void)
2390 {
2391 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2392 static int error;
2393
2394 if (ovsthread_once_start(&once)) {
2395 error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
2396 &ovs_datapath_family);
2397 if (error) {
2398 VLOG_ERR("Generic Netlink family '%s' does not exist. "
2399 "The Open vSwitch kernel module is probably not loaded.",
2400 OVS_DATAPATH_FAMILY);
2401 }
2402 if (!error) {
2403 error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
2404 }
2405 if (!error) {
2406 error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
2407 }
2408 if (!error) {
2409 error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
2410 &ovs_packet_family);
2411 }
2412 if (!error) {
2413 error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
2414 &ovs_vport_mcgroup);
2415 }
2416
2417 ovsthread_once_done(&once);
2418 }
2419
2420 return error;
2421 }
2422
2423 bool
2424 dpif_netlink_is_internal_device(const char *name)
2425 {
2426 struct dpif_netlink_vport reply;
2427 struct ofpbuf *buf;
2428 int error;
2429
2430 error = dpif_netlink_vport_get(name, &reply, &buf);
2431 if (!error) {
2432 ofpbuf_delete(buf);
2433 } else if (error != ENODEV && error != ENOENT) {
2434 VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
2435 name, ovs_strerror(error));
2436 }
2437
2438 return reply.type == OVS_VPORT_TYPE_INTERNAL;
2439 }
2440 \f
2441 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2442 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
2443 * positive errno value.
2444 *
2445 * 'vport' will contain pointers into 'buf', so the caller should not free
2446 * 'buf' while 'vport' is still in use. */
2447 static int
2448 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
2449 const struct ofpbuf *buf)
2450 {
2451 static const struct nl_policy ovs_vport_policy[] = {
2452 [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
2453 [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
2454 [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
2455 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
2456 [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
2457 .optional = true },
2458 [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
2459 };
2460
2461 struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
2462 struct ovs_header *ovs_header;
2463 struct nlmsghdr *nlmsg;
2464 struct genlmsghdr *genl;
2465 struct ofpbuf b;
2466
2467 dpif_netlink_vport_init(vport);
2468
2469 ofpbuf_use_const(&b, buf->data, buf->size);
2470 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2471 genl = ofpbuf_try_pull(&b, sizeof *genl);
2472 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2473 if (!nlmsg || !genl || !ovs_header
2474 || nlmsg->nlmsg_type != ovs_vport_family
2475 || !nl_policy_parse(&b, 0, ovs_vport_policy, a,
2476 ARRAY_SIZE(ovs_vport_policy))) {
2477 return EINVAL;
2478 }
2479
2480 vport->cmd = genl->cmd;
2481 vport->dp_ifindex = ovs_header->dp_ifindex;
2482 vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
2483 vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2484 vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
2485 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2486 vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
2487 / (sizeof *vport->upcall_pids);
2488 vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
2489
2490 }
2491 if (a[OVS_VPORT_ATTR_STATS]) {
2492 vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
2493 }
2494 if (a[OVS_VPORT_ATTR_OPTIONS]) {
2495 vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
2496 vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
2497 }
2498 return 0;
2499 }
2500
2501 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
2502 * followed by Netlink attributes corresponding to 'vport'. */
2503 static void
2504 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
2505 struct ofpbuf *buf)
2506 {
2507 struct ovs_header *ovs_header;
2508
2509 nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
2510 vport->cmd, OVS_VPORT_VERSION);
2511
2512 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2513 ovs_header->dp_ifindex = vport->dp_ifindex;
2514
2515 if (vport->port_no != ODPP_NONE) {
2516 nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
2517 }
2518
2519 if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
2520 nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
2521 }
2522
2523 if (vport->name) {
2524 nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
2525 }
2526
2527 if (vport->upcall_pids) {
2528 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
2529 vport->upcall_pids,
2530 vport->n_upcall_pids * sizeof *vport->upcall_pids);
2531 }
2532
2533 if (vport->stats) {
2534 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
2535 vport->stats, sizeof *vport->stats);
2536 }
2537
2538 if (vport->options) {
2539 nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
2540 vport->options, vport->options_len);
2541 }
2542 }
2543
2544 /* Clears 'vport' to "empty" values. */
2545 void
2546 dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
2547 {
2548 memset(vport, 0, sizeof *vport);
2549 vport->port_no = ODPP_NONE;
2550 }
2551
2552 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2553 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2554 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2555 * result of the command is expected to be an ovs_vport also, which is decoded
2556 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
2557 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
2558 int
2559 dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
2560 struct dpif_netlink_vport *reply,
2561 struct ofpbuf **bufp)
2562 {
2563 struct ofpbuf *request_buf;
2564 int error;
2565
2566 ovs_assert((reply != NULL) == (bufp != NULL));
2567
2568 error = dpif_netlink_init();
2569 if (error) {
2570 if (reply) {
2571 *bufp = NULL;
2572 dpif_netlink_vport_init(reply);
2573 }
2574 return error;
2575 }
2576
2577 request_buf = ofpbuf_new(1024);
2578 dpif_netlink_vport_to_ofpbuf(request, request_buf);
2579 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2580 ofpbuf_delete(request_buf);
2581
2582 if (reply) {
2583 if (!error) {
2584 error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
2585 }
2586 if (error) {
2587 dpif_netlink_vport_init(reply);
2588 ofpbuf_delete(*bufp);
2589 *bufp = NULL;
2590 }
2591 }
2592 return error;
2593 }
2594
2595 /* Obtains information about the kernel vport named 'name' and stores it into
2596 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
2597 * longer needed ('reply' will contain pointers into '*bufp'). */
2598 int
2599 dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
2600 struct ofpbuf **bufp)
2601 {
2602 struct dpif_netlink_vport request;
2603
2604 dpif_netlink_vport_init(&request);
2605 request.cmd = OVS_VPORT_CMD_GET;
2606 request.name = name;
2607
2608 return dpif_netlink_vport_transact(&request, reply, bufp);
2609 }
2610
2611 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2612 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
2613 * positive errno value.
2614 *
2615 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
2616 * while 'dp' is still in use. */
2617 static int
2618 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
2619 {
2620 static const struct nl_policy ovs_datapath_policy[] = {
2621 [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
2622 [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
2623 .optional = true },
2624 [OVS_DP_ATTR_MEGAFLOW_STATS] = {
2625 NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
2626 .optional = true },
2627 };
2628
2629 struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
2630 struct ovs_header *ovs_header;
2631 struct nlmsghdr *nlmsg;
2632 struct genlmsghdr *genl;
2633 struct ofpbuf b;
2634
2635 dpif_netlink_dp_init(dp);
2636
2637 ofpbuf_use_const(&b, buf->data, buf->size);
2638 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2639 genl = ofpbuf_try_pull(&b, sizeof *genl);
2640 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2641 if (!nlmsg || !genl || !ovs_header
2642 || nlmsg->nlmsg_type != ovs_datapath_family
2643 || !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
2644 ARRAY_SIZE(ovs_datapath_policy))) {
2645 return EINVAL;
2646 }
2647
2648 dp->cmd = genl->cmd;
2649 dp->dp_ifindex = ovs_header->dp_ifindex;
2650 dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
2651 if (a[OVS_DP_ATTR_STATS]) {
2652 dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
2653 }
2654
2655 if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
2656 dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
2657 }
2658
2659 return 0;
2660 }
2661
2662 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
2663 static void
2664 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
2665 {
2666 struct ovs_header *ovs_header;
2667
2668 nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
2669 NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
2670 OVS_DATAPATH_VERSION);
2671
2672 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2673 ovs_header->dp_ifindex = dp->dp_ifindex;
2674
2675 if (dp->name) {
2676 nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
2677 }
2678
2679 if (dp->upcall_pid) {
2680 nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
2681 }
2682
2683 if (dp->user_features) {
2684 nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
2685 }
2686
2687 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
2688 }
2689
2690 /* Clears 'dp' to "empty" values. */
2691 static void
2692 dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
2693 {
2694 memset(dp, 0, sizeof *dp);
2695 }
2696
2697 static void
2698 dpif_netlink_dp_dump_start(struct nl_dump *dump)
2699 {
2700 struct dpif_netlink_dp request;
2701 struct ofpbuf *buf;
2702
2703 dpif_netlink_dp_init(&request);
2704 request.cmd = OVS_DP_CMD_GET;
2705
2706 buf = ofpbuf_new(1024);
2707 dpif_netlink_dp_to_ofpbuf(&request, buf);
2708 nl_dump_start(dump, NETLINK_GENERIC, buf);
2709 ofpbuf_delete(buf);
2710 }
2711
2712 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2713 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2714 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2715 * result of the command is expected to be of the same form, which is decoded
2716 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
2717 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
2718 static int
2719 dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
2720 struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
2721 {
2722 struct ofpbuf *request_buf;
2723 int error;
2724
2725 ovs_assert((reply != NULL) == (bufp != NULL));
2726
2727 request_buf = ofpbuf_new(1024);
2728 dpif_netlink_dp_to_ofpbuf(request, request_buf);
2729 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2730 ofpbuf_delete(request_buf);
2731
2732 if (reply) {
2733 dpif_netlink_dp_init(reply);
2734 if (!error) {
2735 error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
2736 }
2737 if (error) {
2738 ofpbuf_delete(*bufp);
2739 *bufp = NULL;
2740 }
2741 }
2742 return error;
2743 }
2744
2745 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
2746 * The caller must free '*bufp' when the reply is no longer needed ('reply'
2747 * will contain pointers into '*bufp'). */
2748 static int
2749 dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
2750 struct ofpbuf **bufp)
2751 {
2752 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2753 struct dpif_netlink_dp request;
2754
2755 dpif_netlink_dp_init(&request);
2756 request.cmd = OVS_DP_CMD_GET;
2757 request.dp_ifindex = dpif->dp_ifindex;
2758
2759 return dpif_netlink_dp_transact(&request, reply, bufp);
2760 }
2761
2762 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2763 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
2764 * positive errno value.
2765 *
2766 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
2767 * while 'flow' is still in use. */
2768 static int
2769 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
2770 const struct ofpbuf *buf)
2771 {
2772 static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
2773 [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
2774 [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
2775 [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
2776 [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
2777 .optional = true },
2778 [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
2779 [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
2780 [OVS_FLOW_ATTR_UFID] = { .type = NL_A_UNSPEC, .optional = true,
2781 .min_len = sizeof(ovs_u128) },
2782 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
2783 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
2784 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
2785 };
2786
2787 struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
2788 struct ovs_header *ovs_header;
2789 struct nlmsghdr *nlmsg;
2790 struct genlmsghdr *genl;
2791 struct ofpbuf b;
2792
2793 dpif_netlink_flow_init(flow);
2794
2795 ofpbuf_use_const(&b, buf->data, buf->size);
2796 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2797 genl = ofpbuf_try_pull(&b, sizeof *genl);
2798 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2799 if (!nlmsg || !genl || !ovs_header
2800 || nlmsg->nlmsg_type != ovs_flow_family
2801 || !nl_policy_parse(&b, 0, ovs_flow_policy, a,
2802 ARRAY_SIZE(ovs_flow_policy))) {
2803 return EINVAL;
2804 }
2805 if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
2806 return EINVAL;
2807 }
2808
2809 flow->nlmsg_flags = nlmsg->nlmsg_flags;
2810 flow->dp_ifindex = ovs_header->dp_ifindex;
2811 if (a[OVS_FLOW_ATTR_KEY]) {
2812 flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
2813 flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
2814 }
2815
2816 if (a[OVS_FLOW_ATTR_UFID]) {
2817 const ovs_u128 *ufid;
2818
2819 ufid = nl_attr_get_unspec(a[OVS_FLOW_ATTR_UFID],
2820 nl_attr_get_size(a[OVS_FLOW_ATTR_UFID]));
2821 flow->ufid = *ufid;
2822 flow->ufid_present = true;
2823 }
2824 if (a[OVS_FLOW_ATTR_MASK]) {
2825 flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
2826 flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
2827 }
2828 if (a[OVS_FLOW_ATTR_ACTIONS]) {
2829 flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
2830 flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
2831 }
2832 if (a[OVS_FLOW_ATTR_STATS]) {
2833 flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
2834 }
2835 if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
2836 flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
2837 }
2838 if (a[OVS_FLOW_ATTR_USED]) {
2839 flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
2840 }
2841 return 0;
2842 }
2843
2844 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
2845 * followed by Netlink attributes corresponding to 'flow'. */
2846 static void
2847 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
2848 struct ofpbuf *buf)
2849 {
2850 struct ovs_header *ovs_header;
2851
2852 nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
2853 NLM_F_REQUEST | flow->nlmsg_flags,
2854 flow->cmd, OVS_FLOW_VERSION);
2855
2856 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2857 ovs_header->dp_ifindex = flow->dp_ifindex;
2858
2859 if (flow->ufid_present) {
2860 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_UFID, &flow->ufid,
2861 sizeof flow->ufid);
2862 }
2863 if (flow->ufid_terse) {
2864 nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
2865 OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
2866 | OVS_UFID_F_OMIT_ACTIONS);
2867 }
2868 if (!flow->ufid_terse || !flow->ufid_present) {
2869 if (flow->key_len) {
2870 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_KEY,
2871 flow->key, flow->key_len);
2872 }
2873
2874 if (flow->mask_len) {
2875 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_MASK,
2876 flow->mask, flow->mask_len);
2877 }
2878 if (flow->actions || flow->actions_len) {
2879 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
2880 flow->actions, flow->actions_len);
2881 }
2882 }
2883
2884 /* We never need to send these to the kernel. */
2885 ovs_assert(!flow->stats);
2886 ovs_assert(!flow->tcp_flags);
2887 ovs_assert(!flow->used);
2888
2889 if (flow->clear) {
2890 nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
2891 }
2892 if (flow->probe) {
2893 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
2894 }
2895 }
2896
2897 /* Clears 'flow' to "empty" values. */
2898 static void
2899 dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
2900 {
2901 memset(flow, 0, sizeof *flow);
2902 }
2903
2904 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2905 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2906 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2907 * result of the command is expected to be a flow also, which is decoded and
2908 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
2909 * is no longer needed ('reply' will contain pointers into '*bufp'). */
2910 static int
2911 dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
2912 struct dpif_netlink_flow *reply,
2913 struct ofpbuf **bufp)
2914 {
2915 struct ofpbuf *request_buf;
2916 int error;
2917
2918 ovs_assert((reply != NULL) == (bufp != NULL));
2919
2920 if (reply) {
2921 request->nlmsg_flags |= NLM_F_ECHO;
2922 }
2923
2924 request_buf = ofpbuf_new(1024);
2925 dpif_netlink_flow_to_ofpbuf(request, request_buf);
2926 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2927 ofpbuf_delete(request_buf);
2928
2929 if (reply) {
2930 if (!error) {
2931 error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
2932 }
2933 if (error) {
2934 dpif_netlink_flow_init(reply);
2935 ofpbuf_delete(*bufp);
2936 *bufp = NULL;
2937 }
2938 }
2939 return error;
2940 }
2941
2942 static void
2943 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
2944 struct dpif_flow_stats *stats)
2945 {
2946 if (flow->stats) {
2947 stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
2948 stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
2949 } else {
2950 stats->n_packets = 0;
2951 stats->n_bytes = 0;
2952 }
2953 stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
2954 stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
2955 }
2956 \f
2957 /* Logs information about a packet that was recently lost in 'ch' (in
2958 * 'dpif_'). */
2959 static void
2960 report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
2961 uint32_t handler_id)
2962 {
2963 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
2964 struct ds s;
2965
2966 if (VLOG_DROP_WARN(&rl)) {
2967 return;
2968 }
2969
2970 ds_init(&s);
2971 if (ch->last_poll != LLONG_MIN) {
2972 ds_put_format(&s, " (last polled %lld ms ago)",
2973 time_msec() - ch->last_poll);
2974 }
2975
2976 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
2977 dpif_name(&dpif->dpif), ch_idx, handler_id);
2978 ds_destroy(&s);
2979 }