]> git.proxmox.com Git - ovs.git/blob - lib/netlink-socket.c
datapath-windows: Avoid BSOD when switch context is NULL
[ovs.git] / lib / netlink-socket.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "netlink-socket.h"
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <stdlib.h>
22 #include <sys/types.h>
23 #include <sys/uio.h>
24 #include <unistd.h>
25 #include "coverage.h"
26 #include "dynamic-string.h"
27 #include "hash.h"
28 #include "hmap.h"
29 #include "netlink.h"
30 #include "netlink-protocol.h"
31 #include "odp-netlink.h"
32 #include "ofpbuf.h"
33 #include "ovs-thread.h"
34 #include "poll-loop.h"
35 #include "seq.h"
36 #include "socket-util.h"
37 #include "util.h"
38 #include "vlog.h"
39
40 VLOG_DEFINE_THIS_MODULE(netlink_socket);
41
42 COVERAGE_DEFINE(netlink_overflow);
43 COVERAGE_DEFINE(netlink_received);
44 COVERAGE_DEFINE(netlink_recv_jumbo);
45 COVERAGE_DEFINE(netlink_sent);
46
47 /* Linux header file confusion causes this to be undefined. */
48 #ifndef SOL_NETLINK
49 #define SOL_NETLINK 270
50 #endif
51
52 #ifdef _WIN32
53 static struct ovs_mutex portid_mutex = OVS_MUTEX_INITIALIZER;
54 static uint32_t g_last_portid = 0;
55
56 /* Port IDs must be unique! */
57 static uint32_t
58 portid_next(void)
59 OVS_GUARDED_BY(portid_mutex)
60 {
61 g_last_portid++;
62 return g_last_portid;
63 }
64 #endif /* _WIN32 */
65
66 /* A single (bad) Netlink message can in theory dump out many, many log
67 * messages, so the burst size is set quite high here to avoid missing useful
68 * information. Also, at high logging levels we log *all* Netlink messages. */
69 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
70
71 static uint32_t nl_sock_allocate_seq(struct nl_sock *, unsigned int n);
72 static void log_nlmsg(const char *function, int error,
73 const void *message, size_t size, int protocol);
74 #ifdef _WIN32
75 static int get_sock_pid_from_kernel(struct nl_sock *sock);
76 #endif
77 \f
78 /* Netlink sockets. */
79
80 struct nl_sock {
81 #ifdef _WIN32
82 HANDLE handle;
83 OVERLAPPED overlapped;
84 DWORD read_ioctl;
85 #else
86 int fd;
87 #endif
88 uint32_t next_seq;
89 uint32_t pid;
90 int protocol;
91 unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
92 };
93
94 /* Compile-time limit on iovecs, so that we can allocate a maximum-size array
95 * of iovecs on the stack. */
96 #define MAX_IOVS 128
97
98 /* Maximum number of iovecs that may be passed to sendmsg, capped at a
99 * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
100 *
101 * Initialized by nl_sock_create(). */
102 static int max_iovs;
103
104 static int nl_pool_alloc(int protocol, struct nl_sock **sockp);
105 static void nl_pool_release(struct nl_sock *);
106
107 /* Creates a new netlink socket for the given netlink 'protocol'
108 * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
109 * new socket if successful, otherwise returns a positive errno value. */
110 int
111 nl_sock_create(int protocol, struct nl_sock **sockp)
112 {
113 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
114 struct nl_sock *sock;
115 #ifndef _WIN32
116 struct sockaddr_nl local, remote;
117 #endif
118 socklen_t local_size;
119 int rcvbuf;
120 int retval = 0;
121
122 if (ovsthread_once_start(&once)) {
123 int save_errno = errno;
124 errno = 0;
125
126 max_iovs = sysconf(_SC_UIO_MAXIOV);
127 if (max_iovs < _XOPEN_IOV_MAX) {
128 if (max_iovs == -1 && errno) {
129 VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", ovs_strerror(errno));
130 }
131 max_iovs = _XOPEN_IOV_MAX;
132 } else if (max_iovs > MAX_IOVS) {
133 max_iovs = MAX_IOVS;
134 }
135
136 errno = save_errno;
137 ovsthread_once_done(&once);
138 }
139
140 *sockp = NULL;
141 sock = xmalloc(sizeof *sock);
142
143 #ifdef _WIN32
144 sock->handle = CreateFile(OVS_DEVICE_NAME_USER,
145 GENERIC_READ | GENERIC_WRITE,
146 FILE_SHARE_READ | FILE_SHARE_WRITE,
147 NULL, OPEN_EXISTING,
148 FILE_FLAG_OVERLAPPED, NULL);
149
150 if (sock->handle == INVALID_HANDLE_VALUE) {
151 VLOG_ERR("fcntl: %s", ovs_lasterror_to_string());
152 goto error;
153 }
154
155 memset(&sock->overlapped, 0, sizeof sock->overlapped);
156 sock->overlapped.hEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
157 if (sock->overlapped.hEvent == NULL) {
158 VLOG_ERR("fcntl: %s", ovs_lasterror_to_string());
159 goto error;
160 }
161 /* Initialize the type/ioctl to Generic */
162 sock->read_ioctl = OVS_IOCTL_READ;
163 #else
164 sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
165 if (sock->fd < 0) {
166 VLOG_ERR("fcntl: %s", ovs_strerror(errno));
167 goto error;
168 }
169 #endif
170
171 sock->protocol = protocol;
172 sock->next_seq = 1;
173
174 rcvbuf = 1024 * 1024;
175 #ifdef _WIN32
176 sock->rcvbuf = rcvbuf;
177 retval = get_sock_pid_from_kernel(sock);
178 if (retval != 0) {
179 goto error;
180 }
181 #else
182 if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
183 &rcvbuf, sizeof rcvbuf)) {
184 /* Only root can use SO_RCVBUFFORCE. Everyone else gets EPERM.
185 * Warn only if the failure is therefore unexpected. */
186 if (errno != EPERM) {
187 VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed "
188 "(%s)", rcvbuf, ovs_strerror(errno));
189 }
190 }
191
192 retval = get_socket_rcvbuf(sock->fd);
193 if (retval < 0) {
194 retval = -retval;
195 goto error;
196 }
197 sock->rcvbuf = retval;
198
199 /* Connect to kernel (pid 0) as remote address. */
200 memset(&remote, 0, sizeof remote);
201 remote.nl_family = AF_NETLINK;
202 remote.nl_pid = 0;
203 if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
204 VLOG_ERR("connect(0): %s", ovs_strerror(errno));
205 goto error;
206 }
207
208 /* Obtain pid assigned by kernel. */
209 local_size = sizeof local;
210 if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
211 VLOG_ERR("getsockname: %s", ovs_strerror(errno));
212 goto error;
213 }
214 if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
215 VLOG_ERR("getsockname returned bad Netlink name");
216 retval = EINVAL;
217 goto error;
218 }
219 sock->pid = local.nl_pid;
220 #endif
221
222 *sockp = sock;
223 return 0;
224
225 error:
226 if (retval == 0) {
227 retval = errno;
228 if (retval == 0) {
229 retval = EINVAL;
230 }
231 }
232 #ifdef _WIN32
233 if (sock->overlapped.hEvent) {
234 CloseHandle(sock->overlapped.hEvent);
235 }
236 if (sock->handle != INVALID_HANDLE_VALUE) {
237 CloseHandle(sock->handle);
238 }
239 #else
240 if (sock->fd >= 0) {
241 close(sock->fd);
242 }
243 #endif
244 free(sock);
245 return retval;
246 }
247
248 /* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
249 * sets '*sockp' to the new socket if successful, otherwise returns a positive
250 * errno value. */
251 int
252 nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
253 {
254 return nl_sock_create(src->protocol, sockp);
255 }
256
257 /* Destroys netlink socket 'sock'. */
258 void
259 nl_sock_destroy(struct nl_sock *sock)
260 {
261 if (sock) {
262 #ifdef _WIN32
263 if (sock->overlapped.hEvent) {
264 CloseHandle(sock->overlapped.hEvent);
265 }
266 CloseHandle(sock->handle);
267 #else
268 close(sock->fd);
269 #endif
270 free(sock);
271 }
272 }
273
274 #ifdef _WIN32
275 /* Reads the pid for 'sock' generated in the kernel datapath. The function
276 * follows a transaction semantic. Eventually this function should call into
277 * nl_transact. */
278 static int
279 get_sock_pid_from_kernel(struct nl_sock *sock)
280 {
281 struct nl_transaction txn;
282 struct ofpbuf request;
283 uint64_t request_stub[128];
284 struct ofpbuf reply;
285 uint64_t reply_stub[128];
286 struct ovs_header *ovs_header;
287 struct nlmsghdr *nlmsg;
288 uint32_t seq;
289 int retval;
290 DWORD bytes;
291 int ovs_msg_size = sizeof (struct nlmsghdr) + sizeof (struct genlmsghdr) +
292 sizeof (struct ovs_header);
293
294 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
295 txn.request = &request;
296 ofpbuf_use_stub(&reply, reply_stub, sizeof reply_stub);
297 txn.reply = &reply;
298
299 seq = nl_sock_allocate_seq(sock, 1);
300 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
301 OVS_CTRL_CMD_WIN_GET_PID, OVS_WIN_CONTROL_VERSION);
302 nlmsg = nl_msg_nlmsghdr(txn.request);
303 nlmsg->nlmsg_seq = seq;
304
305 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
306 ovs_header->dp_ifindex = 0;
307 ovs_header = ofpbuf_put_uninit(&reply, ovs_msg_size);
308
309 if (!DeviceIoControl(sock->handle, OVS_IOCTL_TRANSACT,
310 ofpbuf_data(txn.request), ofpbuf_size(txn.request),
311 ofpbuf_data(txn.reply), ofpbuf_size(txn.reply),
312 &bytes, NULL)) {
313 retval = EINVAL;
314 goto done;
315 } else {
316 if (bytes < ovs_msg_size) {
317 retval = EINVAL;
318 goto done;
319 }
320
321 nlmsg = nl_msg_nlmsghdr(txn.reply);
322 if (nlmsg->nlmsg_seq != seq) {
323 retval = EINVAL;
324 goto done;
325 }
326 sock->pid = nlmsg->nlmsg_pid;
327 }
328 retval = 0;
329
330 done:
331 ofpbuf_uninit(&request);
332 ofpbuf_uninit(&reply);
333 return retval;
334 }
335 #endif /* _WIN32 */
336
337 #ifdef _WIN32
338 static int __inline
339 nl_sock_mcgroup(struct nl_sock *sock, unsigned int multicast_group, bool join)
340 {
341 struct ofpbuf request;
342 uint64_t request_stub[128];
343 struct ovs_header *ovs_header;
344 struct nlmsghdr *nlmsg;
345 int error;
346
347 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
348
349 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
350 OVS_CTRL_CMD_MC_SUBSCRIBE_REQ,
351 OVS_WIN_CONTROL_VERSION);
352
353 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
354 ovs_header->dp_ifindex = 0;
355
356 nl_msg_put_u32(&request, OVS_NL_ATTR_MCAST_GRP, multicast_group);
357 nl_msg_put_u8(&request, OVS_NL_ATTR_MCAST_JOIN, join ? 1 : 0);
358
359 error = nl_sock_send(sock, &request, true);
360 ofpbuf_uninit(&request);
361 return error;
362 }
363 #endif
364 /* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
365 * successful, otherwise a positive errno value.
366 *
367 * A socket that is subscribed to a multicast group that receives asynchronous
368 * notifications must not be used for Netlink transactions or dumps, because
369 * transactions and dumps can cause notifications to be lost.
370 *
371 * Multicast group numbers are always positive.
372 *
373 * It is not an error to attempt to join a multicast group to which a socket
374 * already belongs. */
375 int
376 nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
377 {
378 #ifdef _WIN32
379 /* Set the socket type as a "multicast" socket */
380 sock->read_ioctl = OVS_IOCTL_READ_EVENT;
381 int error = nl_sock_mcgroup(sock, multicast_group, true);
382 if (error) {
383 sock->read_ioctl = OVS_IOCTL_READ;
384 VLOG_WARN("could not join multicast group %u (%s)",
385 multicast_group, ovs_strerror(error));
386 return error;
387 }
388 #else
389 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
390 &multicast_group, sizeof multicast_group) < 0) {
391 VLOG_WARN("could not join multicast group %u (%s)",
392 multicast_group, ovs_strerror(errno));
393 return errno;
394 }
395 #endif
396 return 0;
397 }
398
399 #ifdef _WIN32
400 int
401 nl_sock_subscribe_packets(struct nl_sock *sock)
402 {
403 int error;
404
405 if (sock->read_ioctl != OVS_IOCTL_READ) {
406 return EINVAL;
407 }
408
409 error = nl_sock_subscribe_packet__(sock, true);
410 if (error) {
411 VLOG_WARN("could not unsubscribe packets (%s)",
412 ovs_strerror(errno));
413 return error;
414 }
415 sock->read_ioctl = OVS_IOCTL_READ_PACKET;
416
417 return 0;
418 }
419
420 int
421 nl_sock_unsubscribe_packets(struct nl_sock *sock)
422 {
423 ovs_assert(sock->read_ioctl == OVS_IOCTL_READ_PACKET);
424
425 int error = nl_sock_subscribe_packet__(sock, false);
426 if (error) {
427 VLOG_WARN("could not subscribe to packets (%s)",
428 ovs_strerror(errno));
429 return error;
430 }
431
432 sock->read_ioctl = OVS_IOCTL_READ;
433 return 0;
434 }
435
436 int
437 nl_sock_subscribe_packet__(struct nl_sock *sock, bool subscribe)
438 {
439 struct ofpbuf request;
440 uint64_t request_stub[128];
441 struct ovs_header *ovs_header;
442 struct nlmsghdr *nlmsg;
443 int error;
444
445 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
446 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
447 OVS_CTRL_CMD_PACKET_SUBSCRIBE_REQ,
448 OVS_WIN_CONTROL_VERSION);
449
450 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
451 ovs_header->dp_ifindex = 0;
452 nl_msg_put_u8(&request, OVS_NL_ATTR_PACKET_SUBSCRIBE, subscribe ? 1 : 0);
453 nl_msg_put_u32(&request, OVS_NL_ATTR_PACKET_PID, sock->pid);
454
455 error = nl_sock_send(sock, &request, true);
456 ofpbuf_uninit(&request);
457 return error;
458 }
459 #endif
460
461 /* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
462 * successful, otherwise a positive errno value.
463 *
464 * Multicast group numbers are always positive.
465 *
466 * It is not an error to attempt to leave a multicast group to which a socket
467 * does not belong.
468 *
469 * On success, reading from 'sock' will still return any messages that were
470 * received on 'multicast_group' before the group was left. */
471 int
472 nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
473 {
474 #ifdef _WIN32
475 int error = nl_sock_mcgroup(sock, multicast_group, false);
476 if (error) {
477 VLOG_WARN("could not leave multicast group %u (%s)",
478 multicast_group, ovs_strerror(error));
479 return error;
480 }
481 sock->read_ioctl = OVS_IOCTL_READ;
482 #else
483 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
484 &multicast_group, sizeof multicast_group) < 0) {
485 VLOG_WARN("could not leave multicast group %u (%s)",
486 multicast_group, ovs_strerror(errno));
487 return errno;
488 }
489 #endif
490 return 0;
491 }
492
493 static int
494 nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg,
495 uint32_t nlmsg_seq, bool wait)
496 {
497 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
498 int error;
499
500 nlmsg->nlmsg_len = ofpbuf_size(msg);
501 nlmsg->nlmsg_seq = nlmsg_seq;
502 nlmsg->nlmsg_pid = sock->pid;
503 do {
504 int retval;
505 #ifdef _WIN32
506 DWORD bytes;
507
508 if (!DeviceIoControl(sock->handle, OVS_IOCTL_WRITE,
509 ofpbuf_data(msg), ofpbuf_size(msg), NULL, 0,
510 &bytes, NULL)) {
511 retval = -1;
512 /* XXX: Map to a more appropriate error based on GetLastError(). */
513 errno = EINVAL;
514 } else {
515 retval = ofpbuf_size(msg);
516 }
517 #else
518 retval = send(sock->fd, ofpbuf_data(msg), ofpbuf_size(msg),
519 wait ? 0 : MSG_DONTWAIT);
520 #endif
521 error = retval < 0 ? errno : 0;
522 } while (error == EINTR);
523 log_nlmsg(__func__, error, ofpbuf_data(msg), ofpbuf_size(msg), sock->protocol);
524 if (!error) {
525 COVERAGE_INC(netlink_sent);
526 }
527 return error;
528 }
529
530 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
531 * 'sock'. nlmsg_len in 'msg' will be finalized to match ofpbuf_size(msg), nlmsg_pid
532 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to a fresh
533 * sequence number, before the message is sent.
534 *
535 * Returns 0 if successful, otherwise a positive errno value. If
536 * 'wait' is true, then the send will wait until buffer space is ready;
537 * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
538 int
539 nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
540 {
541 return nl_sock_send_seq(sock, msg, nl_sock_allocate_seq(sock, 1), wait);
542 }
543
544 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
545 * 'sock'. nlmsg_len in 'msg' will be finalized to match ofpbuf_size(msg), nlmsg_pid
546 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to
547 * 'nlmsg_seq', before the message is sent.
548 *
549 * Returns 0 if successful, otherwise a positive errno value. If
550 * 'wait' is true, then the send will wait until buffer space is ready;
551 * otherwise, returns EAGAIN if the 'sock' send buffer is full.
552 *
553 * This function is suitable for sending a reply to a request that was received
554 * with sequence number 'nlmsg_seq'. Otherwise, use nl_sock_send() instead. */
555 int
556 nl_sock_send_seq(struct nl_sock *sock, const struct ofpbuf *msg,
557 uint32_t nlmsg_seq, bool wait)
558 {
559 return nl_sock_send__(sock, msg, nlmsg_seq, wait);
560 }
561
562 static int
563 nl_sock_recv__(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
564 {
565 /* We can't accurately predict the size of the data to be received. The
566 * caller is supposed to have allocated enough space in 'buf' to handle the
567 * "typical" case. To handle exceptions, we make available enough space in
568 * 'tail' to allow Netlink messages to be up to 64 kB long (a reasonable
569 * figure since that's the maximum length of a Netlink attribute). */
570 struct nlmsghdr *nlmsghdr;
571 uint8_t tail[65536];
572 struct iovec iov[2];
573 struct msghdr msg;
574 ssize_t retval;
575 int error;
576
577 ovs_assert(buf->allocated >= sizeof *nlmsghdr);
578 ofpbuf_clear(buf);
579
580 iov[0].iov_base = ofpbuf_base(buf);
581 iov[0].iov_len = buf->allocated;
582 iov[1].iov_base = tail;
583 iov[1].iov_len = sizeof tail;
584
585 memset(&msg, 0, sizeof msg);
586 msg.msg_iov = iov;
587 msg.msg_iovlen = 2;
588
589 /* Receive a Netlink message from the kernel.
590 *
591 * This works around a kernel bug in which the kernel returns an error code
592 * as if it were the number of bytes read. It doesn't actually modify
593 * anything in the receive buffer in that case, so we can initialize the
594 * Netlink header with an impossible message length and then, upon success,
595 * check whether it changed. */
596 nlmsghdr = ofpbuf_base(buf);
597 do {
598 nlmsghdr->nlmsg_len = UINT32_MAX;
599 #ifdef _WIN32
600 DWORD bytes;
601 if (!DeviceIoControl(sock->handle, sock->read_ioctl,
602 NULL, 0, tail, sizeof tail, &bytes, NULL)) {
603 retval = -1;
604 errno = EINVAL;
605 } else {
606 retval = bytes;
607 if (retval == 0) {
608 retval = -1;
609 errno = EAGAIN;
610 } else {
611 if (retval >= buf->allocated) {
612 ofpbuf_reinit(buf, retval);
613 nlmsghdr = ofpbuf_base(buf);
614 nlmsghdr->nlmsg_len = UINT32_MAX;
615 }
616 memcpy(ofpbuf_data(buf), tail, retval);
617 ofpbuf_set_size(buf, retval);
618 }
619 }
620 #else
621 retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
622 #endif
623 error = (retval < 0 ? errno
624 : retval == 0 ? ECONNRESET /* not possible? */
625 : nlmsghdr->nlmsg_len != UINT32_MAX ? 0
626 : retval);
627 } while (error == EINTR);
628 if (error) {
629 if (error == ENOBUFS) {
630 /* Socket receive buffer overflow dropped one or more messages that
631 * the kernel tried to send to us. */
632 COVERAGE_INC(netlink_overflow);
633 }
634 return error;
635 }
636
637 if (msg.msg_flags & MSG_TRUNC) {
638 VLOG_ERR_RL(&rl, "truncated message (longer than %"PRIuSIZE" bytes)",
639 sizeof tail);
640 return E2BIG;
641 }
642
643 if (retval < sizeof *nlmsghdr
644 || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
645 || nlmsghdr->nlmsg_len > retval) {
646 VLOG_ERR_RL(&rl, "received invalid nlmsg (%"PRIuSIZE" bytes < %"PRIuSIZE")",
647 retval, sizeof *nlmsghdr);
648 return EPROTO;
649 }
650 #ifndef _WIN32
651 ofpbuf_set_size(buf, MIN(retval, buf->allocated));
652 if (retval > buf->allocated) {
653 COVERAGE_INC(netlink_recv_jumbo);
654 ofpbuf_put(buf, tail, retval - buf->allocated);
655 }
656 #endif
657
658 log_nlmsg(__func__, 0, ofpbuf_data(buf), ofpbuf_size(buf), sock->protocol);
659 COVERAGE_INC(netlink_received);
660
661 return 0;
662 }
663
664 /* Tries to receive a Netlink message from the kernel on 'sock' into 'buf'. If
665 * 'wait' is true, waits for a message to be ready. Otherwise, fails with
666 * EAGAIN if the 'sock' receive buffer is empty.
667 *
668 * The caller must have initialized 'buf' with an allocation of at least
669 * NLMSG_HDRLEN bytes. For best performance, the caller should allocate enough
670 * space for a "typical" message.
671 *
672 * On success, returns 0 and replaces 'buf''s previous content by the received
673 * message. This function expands 'buf''s allocated memory, as necessary, to
674 * hold the actual size of the received message.
675 *
676 * On failure, returns a positive errno value and clears 'buf' to zero length.
677 * 'buf' retains its previous memory allocation.
678 *
679 * Regardless of success or failure, this function resets 'buf''s headroom to
680 * 0. */
681 int
682 nl_sock_recv(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
683 {
684 return nl_sock_recv__(sock, buf, wait);
685 }
686
687 static void
688 nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
689 int error)
690 {
691 size_t i;
692
693 for (i = 0; i < n; i++) {
694 struct nl_transaction *txn = transactions[i];
695
696 txn->error = error;
697 if (txn->reply) {
698 ofpbuf_clear(txn->reply);
699 }
700 }
701 }
702
703 static int
704 nl_sock_transact_multiple__(struct nl_sock *sock,
705 struct nl_transaction **transactions, size_t n,
706 size_t *done)
707 {
708 uint64_t tmp_reply_stub[1024 / 8];
709 struct nl_transaction tmp_txn;
710 struct ofpbuf tmp_reply;
711
712 uint32_t base_seq;
713 struct iovec iovs[MAX_IOVS];
714 struct msghdr msg;
715 int error;
716 int i;
717
718 base_seq = nl_sock_allocate_seq(sock, n);
719 *done = 0;
720 for (i = 0; i < n; i++) {
721 struct nl_transaction *txn = transactions[i];
722 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);
723
724 nlmsg->nlmsg_len = ofpbuf_size(txn->request);
725 nlmsg->nlmsg_seq = base_seq + i;
726 nlmsg->nlmsg_pid = sock->pid;
727
728 iovs[i].iov_base = ofpbuf_data(txn->request);
729 iovs[i].iov_len = ofpbuf_size(txn->request);
730 }
731
732 #ifndef _WIN32
733 memset(&msg, 0, sizeof msg);
734 msg.msg_iov = iovs;
735 msg.msg_iovlen = n;
736 do {
737 error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
738 } while (error == EINTR);
739
740 for (i = 0; i < n; i++) {
741 struct nl_transaction *txn = transactions[i];
742
743 log_nlmsg(__func__, error, ofpbuf_data(txn->request),
744 ofpbuf_size(txn->request), sock->protocol);
745 }
746 if (!error) {
747 COVERAGE_ADD(netlink_sent, n);
748 }
749
750 if (error) {
751 return error;
752 }
753
754 ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
755 tmp_txn.request = NULL;
756 tmp_txn.reply = &tmp_reply;
757 tmp_txn.error = 0;
758 while (n > 0) {
759 struct nl_transaction *buf_txn, *txn;
760 uint32_t seq;
761
762 /* Find a transaction whose buffer we can use for receiving a reply.
763 * If no such transaction is left, use tmp_txn. */
764 buf_txn = &tmp_txn;
765 for (i = 0; i < n; i++) {
766 if (transactions[i]->reply) {
767 buf_txn = transactions[i];
768 break;
769 }
770 }
771
772 /* Receive a reply. */
773 error = nl_sock_recv__(sock, buf_txn->reply, false);
774 if (error) {
775 if (error == EAGAIN) {
776 nl_sock_record_errors__(transactions, n, 0);
777 *done += n;
778 error = 0;
779 }
780 break;
781 }
782
783 /* Match the reply up with a transaction. */
784 seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
785 if (seq < base_seq || seq >= base_seq + n) {
786 VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
787 continue;
788 }
789 i = seq - base_seq;
790 txn = transactions[i];
791
792 /* Fill in the results for 'txn'. */
793 if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
794 if (txn->reply) {
795 ofpbuf_clear(txn->reply);
796 }
797 if (txn->error) {
798 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
799 error, ovs_strerror(txn->error));
800 }
801 } else {
802 txn->error = 0;
803 if (txn->reply && txn != buf_txn) {
804 /* Swap buffers. */
805 struct ofpbuf *reply = buf_txn->reply;
806 buf_txn->reply = txn->reply;
807 txn->reply = reply;
808 }
809 }
810
811 /* Fill in the results for transactions before 'txn'. (We have to do
812 * this after the results for 'txn' itself because of the buffer swap
813 * above.) */
814 nl_sock_record_errors__(transactions, i, 0);
815
816 /* Advance. */
817 *done += i + 1;
818 transactions += i + 1;
819 n -= i + 1;
820 base_seq += i + 1;
821 }
822 ofpbuf_uninit(&tmp_reply);
823 #else
824 error = 0;
825 uint8_t reply_buf[65536];
826 for (i = 0; i < n; i++) {
827 DWORD reply_len;
828 struct nl_transaction *txn = transactions[i];
829 struct nlmsghdr *request_nlmsg, *reply_nlmsg;
830
831 if (!DeviceIoControl(sock->handle, OVS_IOCTL_TRANSACT,
832 ofpbuf_data(txn->request),
833 ofpbuf_size(txn->request),
834 reply_buf, sizeof reply_buf,
835 &reply_len, NULL)) {
836 /* XXX: Map to a more appropriate error. */
837 error = EINVAL;
838 break;
839 }
840
841 if (reply_len < sizeof *reply_nlmsg) {
842 nl_sock_record_errors__(transactions, n, 0);
843 VLOG_DBG_RL(&rl, "insufficient length of reply %#"PRIu32
844 " for seq: %#"PRIx32, reply_len, request_nlmsg->nlmsg_seq);
845 break;
846 }
847
848 /* Validate the sequence number in the reply. */
849 request_nlmsg = nl_msg_nlmsghdr(txn->request);
850 reply_nlmsg = (struct nlmsghdr *)reply_buf;
851
852 if (request_nlmsg->nlmsg_seq != reply_nlmsg->nlmsg_seq) {
853 ovs_assert(request_nlmsg->nlmsg_seq == reply_nlmsg->nlmsg_seq);
854 VLOG_DBG_RL(&rl, "mismatched seq request %#"PRIx32
855 ", reply %#"PRIx32, request_nlmsg->nlmsg_seq,
856 reply_nlmsg->nlmsg_seq);
857 break;
858 }
859
860 /* Handle errors embedded within the netlink message. */
861 ofpbuf_use_stub(&tmp_reply, reply_buf, sizeof reply_buf);
862 ofpbuf_set_size(&tmp_reply, sizeof reply_buf);
863 if (nl_msg_nlmsgerr(&tmp_reply, &txn->error)) {
864 if (txn->reply) {
865 ofpbuf_clear(txn->reply);
866 }
867 if (txn->error) {
868 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
869 error, ovs_strerror(txn->error));
870 }
871 } else {
872 txn->error = 0;
873 if (txn->reply) {
874 /* Copy the reply to the buffer specified by the caller. */
875 if (reply_len > txn->reply->allocated) {
876 ofpbuf_reinit(txn->reply, reply_len);
877 }
878 memcpy(ofpbuf_data(txn->reply), reply_buf, reply_len);
879 ofpbuf_set_size(txn->reply, reply_len);
880 }
881 }
882 ofpbuf_uninit(&tmp_reply);
883
884 /* Count the number of successful transactions. */
885 (*done)++;
886
887 }
888
889 if (!error) {
890 COVERAGE_ADD(netlink_sent, n);
891 }
892 #endif
893
894 return error;
895 }
896
897 static void
898 nl_sock_transact_multiple(struct nl_sock *sock,
899 struct nl_transaction **transactions, size_t n)
900 {
901 int max_batch_count;
902 int error;
903
904 if (!n) {
905 return;
906 }
907
908 /* In theory, every request could have a 64 kB reply. But the default and
909 * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
910 * be a bit below 128 kB, so that would only allow a single message in a
911 * "batch". So we assume that replies average (at most) 4 kB, which allows
912 * a good deal of batching.
913 *
914 * In practice, most of the requests that we batch either have no reply at
915 * all or a brief reply. */
916 max_batch_count = MAX(sock->rcvbuf / 4096, 1);
917 max_batch_count = MIN(max_batch_count, max_iovs);
918
919 while (n > 0) {
920 size_t count, bytes;
921 size_t done;
922
923 /* Batch up to 'max_batch_count' transactions. But cap it at about a
924 * page of requests total because big skbuffs are expensive to
925 * allocate in the kernel. */
926 #if defined(PAGESIZE)
927 enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
928 #else
929 enum { MAX_BATCH_BYTES = 4096 - 512 };
930 #endif
931 bytes = ofpbuf_size(transactions[0]->request);
932 for (count = 1; count < n && count < max_batch_count; count++) {
933 if (bytes + ofpbuf_size(transactions[count]->request) > MAX_BATCH_BYTES) {
934 break;
935 }
936 bytes += ofpbuf_size(transactions[count]->request);
937 }
938
939 error = nl_sock_transact_multiple__(sock, transactions, count, &done);
940 transactions += done;
941 n -= done;
942
943 if (error == ENOBUFS) {
944 VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
945 } else if (error) {
946 VLOG_ERR_RL(&rl, "transaction error (%s)", ovs_strerror(error));
947 nl_sock_record_errors__(transactions, n, error);
948 }
949 }
950 }
951
952 static int
953 nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
954 struct ofpbuf **replyp)
955 {
956 struct nl_transaction *transactionp;
957 struct nl_transaction transaction;
958
959 transaction.request = CONST_CAST(struct ofpbuf *, request);
960 transaction.reply = replyp ? ofpbuf_new(1024) : NULL;
961 transactionp = &transaction;
962
963 nl_sock_transact_multiple(sock, &transactionp, 1);
964
965 if (replyp) {
966 if (transaction.error) {
967 ofpbuf_delete(transaction.reply);
968 *replyp = NULL;
969 } else {
970 *replyp = transaction.reply;
971 }
972 }
973
974 return transaction.error;
975 }
976
977 /* Drain all the messages currently in 'sock''s receive queue. */
978 int
979 nl_sock_drain(struct nl_sock *sock)
980 {
981 #ifdef _WIN32
982 return 0;
983 #else
984 return drain_rcvbuf(sock->fd);
985 #endif
986 }
987
988 /* Starts a Netlink "dump" operation, by sending 'request' to the kernel on a
989 * Netlink socket created with the given 'protocol', and initializes 'dump' to
990 * reflect the state of the operation.
991 *
992 * 'request' must contain a Netlink message. Before sending the message,
993 * nlmsg_len will be finalized to match request->size, and nlmsg_pid will be
994 * set to the Netlink socket's pid. NLM_F_DUMP and NLM_F_ACK will be set in
995 * nlmsg_flags.
996 *
997 * The design of this Netlink socket library ensures that the dump is reliable.
998 *
999 * This function provides no status indication. nl_dump_done() provides an
1000 * error status for the entire dump operation.
1001 *
1002 * The caller must eventually destroy 'request'.
1003 */
1004 void
1005 nl_dump_start(struct nl_dump *dump, int protocol, const struct ofpbuf *request)
1006 {
1007 nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
1008
1009 ovs_mutex_init(&dump->mutex);
1010 ovs_mutex_lock(&dump->mutex);
1011 dump->status = nl_pool_alloc(protocol, &dump->sock);
1012 if (!dump->status) {
1013 dump->status = nl_sock_send__(dump->sock, request,
1014 nl_sock_allocate_seq(dump->sock, 1),
1015 true);
1016 }
1017 dump->nl_seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
1018 ovs_mutex_unlock(&dump->mutex);
1019 }
1020
1021 static int
1022 nl_dump_refill(struct nl_dump *dump, struct ofpbuf *buffer)
1023 OVS_REQUIRES(dump->mutex)
1024 {
1025 struct nlmsghdr *nlmsghdr;
1026 int error;
1027
1028 while (!ofpbuf_size(buffer)) {
1029 error = nl_sock_recv__(dump->sock, buffer, false);
1030 if (error) {
1031 /* The kernel never blocks providing the results of a dump, so
1032 * error == EAGAIN means that we've read the whole thing, and
1033 * therefore transform it into EOF. (The kernel always provides
1034 * NLMSG_DONE as a sentinel. Some other thread must have received
1035 * that already but not yet signaled it in 'status'.)
1036 *
1037 * Any other error is just an error. */
1038 return error == EAGAIN ? EOF : error;
1039 }
1040
1041 nlmsghdr = nl_msg_nlmsghdr(buffer);
1042 if (dump->nl_seq != nlmsghdr->nlmsg_seq) {
1043 VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
1044 nlmsghdr->nlmsg_seq, dump->nl_seq);
1045 ofpbuf_clear(buffer);
1046 }
1047 }
1048
1049 if (nl_msg_nlmsgerr(buffer, &error) && error) {
1050 VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
1051 ovs_strerror(error));
1052 ofpbuf_clear(buffer);
1053 return error;
1054 }
1055
1056 return 0;
1057 }
1058
1059 static int
1060 nl_dump_next__(struct ofpbuf *reply, struct ofpbuf *buffer)
1061 {
1062 struct nlmsghdr *nlmsghdr = nl_msg_next(buffer, reply);
1063 if (!nlmsghdr) {
1064 VLOG_WARN_RL(&rl, "netlink dump contains message fragment");
1065 return EPROTO;
1066 } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
1067 return EOF;
1068 } else {
1069 return 0;
1070 }
1071 }
1072
1073 /* Attempts to retrieve another reply from 'dump' into 'buffer'. 'dump' must
1074 * have been initialized with nl_dump_start(), and 'buffer' must have been
1075 * initialized. 'buffer' should be at least NL_DUMP_BUFSIZE bytes long.
1076 *
1077 * If successful, returns true and points 'reply->data' and
1078 * 'ofpbuf_size(reply)' to the message that was retrieved. The caller must not
1079 * modify 'reply' (because it points within 'buffer', which will be used by
1080 * future calls to this function).
1081 *
1082 * On failure, returns false and sets 'reply->data' to NULL and
1083 * 'ofpbuf_size(reply)' to 0. Failure might indicate an actual error or merely
1084 * the end of replies. An error status for the entire dump operation is
1085 * provided when it is completed by calling nl_dump_done().
1086 *
1087 * Multiple threads may call this function, passing the same nl_dump, however
1088 * each must provide independent buffers. This function may cache multiple
1089 * replies in the buffer, and these will be processed before more replies are
1090 * fetched. When this function returns false, other threads may continue to
1091 * process replies in their buffers, but they will not fetch more replies.
1092 */
1093 bool
1094 nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply, struct ofpbuf *buffer)
1095 {
1096 int retval = 0;
1097
1098 /* If the buffer is empty, refill it.
1099 *
1100 * If the buffer is not empty, we don't check the dump's status.
1101 * Otherwise, we could end up skipping some of the dump results if thread A
1102 * hits EOF while thread B is in the midst of processing a batch. */
1103 if (!ofpbuf_size(buffer)) {
1104 ovs_mutex_lock(&dump->mutex);
1105 if (!dump->status) {
1106 /* Take the mutex here to avoid an in-kernel race. If two threads
1107 * try to read from a Netlink dump socket at once, then the socket
1108 * error can be set to EINVAL, which will be encountered on the
1109 * next recv on that socket, which could be anywhere due to the way
1110 * that we pool Netlink sockets. Serializing the recv calls avoids
1111 * the issue. */
1112 dump->status = nl_dump_refill(dump, buffer);
1113 }
1114 retval = dump->status;
1115 ovs_mutex_unlock(&dump->mutex);
1116 }
1117
1118 /* Fetch the next message from the buffer. */
1119 if (!retval) {
1120 retval = nl_dump_next__(reply, buffer);
1121 if (retval) {
1122 /* Record 'retval' as the dump status, but don't overwrite an error
1123 * with EOF. */
1124 ovs_mutex_lock(&dump->mutex);
1125 if (dump->status <= 0) {
1126 dump->status = retval;
1127 }
1128 ovs_mutex_unlock(&dump->mutex);
1129 }
1130 }
1131
1132 if (retval) {
1133 ofpbuf_set_data(reply, NULL);
1134 ofpbuf_set_size(reply, 0);
1135 }
1136 return !retval;
1137 }
1138
1139 /* Completes Netlink dump operation 'dump', which must have been initialized
1140 * with nl_dump_start(). Returns 0 if the dump operation was error-free,
1141 * otherwise a positive errno value describing the problem. */
1142 int
1143 nl_dump_done(struct nl_dump *dump)
1144 {
1145 int status;
1146
1147 ovs_mutex_lock(&dump->mutex);
1148 status = dump->status;
1149 ovs_mutex_unlock(&dump->mutex);
1150
1151 /* Drain any remaining messages that the client didn't read. Otherwise the
1152 * kernel will continue to queue them up and waste buffer space.
1153 *
1154 * XXX We could just destroy and discard the socket in this case. */
1155 if (!status) {
1156 uint64_t tmp_reply_stub[NL_DUMP_BUFSIZE / 8];
1157 struct ofpbuf reply, buf;
1158
1159 ofpbuf_use_stub(&buf, tmp_reply_stub, sizeof tmp_reply_stub);
1160 while (nl_dump_next(dump, &reply, &buf)) {
1161 /* Nothing to do. */
1162 }
1163 ofpbuf_uninit(&buf);
1164
1165 ovs_mutex_lock(&dump->mutex);
1166 status = dump->status;
1167 ovs_mutex_unlock(&dump->mutex);
1168 ovs_assert(status);
1169 }
1170
1171 nl_pool_release(dump->sock);
1172 ovs_mutex_destroy(&dump->mutex);
1173
1174 return status == EOF ? 0 : status;
1175 }
1176
1177 #ifdef _WIN32
1178 /* Pend an I/O request in the driver. The driver completes the I/O whenever
1179 * an event or a packet is ready to be read. Once the I/O is completed
1180 * the overlapped structure event associated with the pending I/O will be set
1181 */
1182 static int
1183 pend_io_request(struct nl_sock *sock)
1184 {
1185 struct ofpbuf request;
1186 uint64_t request_stub[128];
1187 struct ovs_header *ovs_header;
1188 struct nlmsghdr *nlmsg;
1189 uint32_t seq;
1190 int retval;
1191 int error;
1192 DWORD bytes;
1193 OVERLAPPED *overlapped = CONST_CAST(OVERLAPPED *, &sock->overlapped);
1194
1195 int ovs_msg_size = sizeof (struct nlmsghdr) + sizeof (struct genlmsghdr) +
1196 sizeof (struct ovs_header);
1197
1198 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
1199
1200 seq = nl_sock_allocate_seq(sock, 1);
1201 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
1202 OVS_CTRL_CMD_WIN_PEND_REQ, OVS_WIN_CONTROL_VERSION);
1203 nlmsg = nl_msg_nlmsghdr(&request);
1204 nlmsg->nlmsg_seq = seq;
1205
1206 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
1207 ovs_header->dp_ifindex = 0;
1208
1209 if (!DeviceIoControl(sock->handle, OVS_IOCTL_WRITE,
1210 ofpbuf_data(&request), ofpbuf_size(&request),
1211 NULL, 0, &bytes, overlapped)) {
1212 error = GetLastError();
1213 /* Check if the I/O got pended */
1214 if (error != ERROR_IO_INCOMPLETE && error != ERROR_IO_PENDING) {
1215 VLOG_ERR("nl_sock_wait failed - %s\n", ovs_format_message(error));
1216 retval = EINVAL;
1217 goto done;
1218 }
1219 } else {
1220 /* The I/O was completed synchronously */
1221 poll_immediate_wake();
1222 }
1223 retval = 0;
1224
1225 done:
1226 ofpbuf_uninit(&request);
1227 return retval;
1228 }
1229 #endif /* _WIN32 */
1230
1231 /* Causes poll_block() to wake up when any of the specified 'events' (which is
1232 * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'.
1233 * On Windows, 'sock' is not treated as const, and may be modified. */
1234 void
1235 nl_sock_wait(const struct nl_sock *sock, short int events)
1236 {
1237 #ifdef _WIN32
1238 if (sock->overlapped.Internal != STATUS_PENDING) {
1239 pend_io_request(CONST_CAST(struct nl_sock *, sock));
1240 /* XXX: poll_wevent_wait(sock->overlapped.hEvent); */
1241 }
1242 poll_immediate_wake(); /* XXX: temporary. */
1243 #else
1244 poll_fd_wait(sock->fd, events);
1245 #endif
1246 }
1247
1248 /* Returns the underlying fd for 'sock', for use in "poll()"-like operations
1249 * that can't use nl_sock_wait().
1250 *
1251 * It's a little tricky to use the returned fd correctly, because nl_sock does
1252 * "copy on write" to allow a single nl_sock to be used for notifications,
1253 * transactions, and dumps. If 'sock' is used only for notifications and
1254 * transactions (and never for dump) then the usage is safe. */
1255 int
1256 nl_sock_fd(const struct nl_sock *sock)
1257 {
1258 #ifdef _WIN32
1259 BUILD_ASSERT_DECL(sizeof sock->handle == sizeof(int));
1260 return (int)sock->handle;
1261 #else
1262 return sock->fd;
1263 #endif
1264 }
1265
1266 /* Returns the PID associated with this socket. */
1267 uint32_t
1268 nl_sock_pid(const struct nl_sock *sock)
1269 {
1270 return sock->pid;
1271 }
1272 \f
1273 /* Miscellaneous. */
1274
1275 struct genl_family {
1276 struct hmap_node hmap_node;
1277 uint16_t id;
1278 char *name;
1279 };
1280
1281 static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
1282
1283 static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
1284 [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
1285 [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
1286 };
1287
1288 static struct genl_family *
1289 find_genl_family_by_id(uint16_t id)
1290 {
1291 struct genl_family *family;
1292
1293 HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
1294 &genl_families) {
1295 if (family->id == id) {
1296 return family;
1297 }
1298 }
1299 return NULL;
1300 }
1301
1302 static void
1303 define_genl_family(uint16_t id, const char *name)
1304 {
1305 struct genl_family *family = find_genl_family_by_id(id);
1306
1307 if (family) {
1308 if (!strcmp(family->name, name)) {
1309 return;
1310 }
1311 free(family->name);
1312 } else {
1313 family = xmalloc(sizeof *family);
1314 family->id = id;
1315 hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
1316 }
1317 family->name = xstrdup(name);
1318 }
1319
1320 static const char *
1321 genl_family_to_name(uint16_t id)
1322 {
1323 if (id == GENL_ID_CTRL) {
1324 return "control";
1325 } else {
1326 struct genl_family *family = find_genl_family_by_id(id);
1327 return family ? family->name : "unknown";
1328 }
1329 }
1330
1331 #ifndef _WIN32
1332 static int
1333 do_lookup_genl_family(const char *name, struct nlattr **attrs,
1334 struct ofpbuf **replyp)
1335 {
1336 struct nl_sock *sock;
1337 struct ofpbuf request, *reply;
1338 int error;
1339
1340 *replyp = NULL;
1341 error = nl_sock_create(NETLINK_GENERIC, &sock);
1342 if (error) {
1343 return error;
1344 }
1345
1346 ofpbuf_init(&request, 0);
1347 nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
1348 CTRL_CMD_GETFAMILY, 1);
1349 nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
1350 error = nl_sock_transact(sock, &request, &reply);
1351 ofpbuf_uninit(&request);
1352 if (error) {
1353 nl_sock_destroy(sock);
1354 return error;
1355 }
1356
1357 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
1358 family_policy, attrs, ARRAY_SIZE(family_policy))
1359 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
1360 nl_sock_destroy(sock);
1361 ofpbuf_delete(reply);
1362 return EPROTO;
1363 }
1364
1365 nl_sock_destroy(sock);
1366 *replyp = reply;
1367 return 0;
1368 }
1369 #else
1370 static int
1371 do_lookup_genl_family(const char *name, struct nlattr **attrs,
1372 struct ofpbuf **replyp)
1373 {
1374 struct nlmsghdr *nlmsg;
1375 struct ofpbuf *reply;
1376 int error;
1377 uint16_t family_id;
1378 const char *family_name;
1379 uint32_t family_version;
1380 uint32_t family_attrmax;
1381 uint32_t mcgrp_id = OVS_WIN_NL_INVALID_MCGRP_ID;
1382 const char *mcgrp_name = NULL;
1383
1384 *replyp = NULL;
1385 reply = ofpbuf_new(1024);
1386
1387 /* CTRL_ATTR_MCAST_GROUPS is supported only for VPORT family. */
1388 if (!strcmp(name, OVS_WIN_CONTROL_FAMILY)) {
1389 family_id = OVS_WIN_NL_CTRL_FAMILY_ID;
1390 family_name = OVS_WIN_CONTROL_FAMILY;
1391 family_version = OVS_WIN_CONTROL_VERSION;
1392 family_attrmax = OVS_WIN_CONTROL_ATTR_MAX;
1393 } else if (!strcmp(name, OVS_DATAPATH_FAMILY)) {
1394 family_id = OVS_WIN_NL_DATAPATH_FAMILY_ID;
1395 family_name = OVS_DATAPATH_FAMILY;
1396 family_version = OVS_DATAPATH_VERSION;
1397 family_attrmax = OVS_DP_ATTR_MAX;
1398 } else if (!strcmp(name, OVS_PACKET_FAMILY)) {
1399 family_id = OVS_WIN_NL_PACKET_FAMILY_ID;
1400 family_name = OVS_PACKET_FAMILY;
1401 family_version = OVS_PACKET_VERSION;
1402 family_attrmax = OVS_PACKET_ATTR_MAX;
1403 } else if (!strcmp(name, OVS_VPORT_FAMILY)) {
1404 family_id = OVS_WIN_NL_VPORT_FAMILY_ID;
1405 family_name = OVS_VPORT_FAMILY;
1406 family_version = OVS_VPORT_VERSION;
1407 family_attrmax = OVS_VPORT_ATTR_MAX;
1408 mcgrp_id = OVS_WIN_NL_VPORT_MCGRP_ID;
1409 mcgrp_name = OVS_VPORT_MCGROUP;
1410 } else if (!strcmp(name, OVS_FLOW_FAMILY)) {
1411 family_id = OVS_WIN_NL_FLOW_FAMILY_ID;
1412 family_name = OVS_FLOW_FAMILY;
1413 family_version = OVS_FLOW_VERSION;
1414 family_attrmax = OVS_FLOW_ATTR_MAX;
1415 } else if (!strcmp(name, OVS_WIN_NETDEV_FAMILY)) {
1416 family_id = OVS_WIN_NL_NETDEV_FAMILY_ID;
1417 family_name = OVS_WIN_NETDEV_FAMILY;
1418 family_version = OVS_WIN_NETDEV_VERSION;
1419 family_attrmax = OVS_WIN_NETDEV_ATTR_MAX;
1420 } else {
1421 ofpbuf_delete(reply);
1422 return EINVAL;
1423 }
1424
1425 nl_msg_put_genlmsghdr(reply, 0, GENL_ID_CTRL, 0,
1426 CTRL_CMD_NEWFAMILY, family_version);
1427 /* CTRL_ATTR_HDRSIZE and CTRL_ATTR_OPS are not populated, but the
1428 * callers do not seem to need them. */
1429 nl_msg_put_u16(reply, CTRL_ATTR_FAMILY_ID, family_id);
1430 nl_msg_put_string(reply, CTRL_ATTR_FAMILY_NAME, family_name);
1431 nl_msg_put_u32(reply, CTRL_ATTR_VERSION, family_version);
1432 nl_msg_put_u32(reply, CTRL_ATTR_MAXATTR, family_attrmax);
1433
1434 if (mcgrp_id != OVS_WIN_NL_INVALID_MCGRP_ID) {
1435 size_t mcgrp_ofs1 = nl_msg_start_nested(reply, CTRL_ATTR_MCAST_GROUPS);
1436 size_t mcgrp_ofs2= nl_msg_start_nested(reply,
1437 OVS_WIN_NL_VPORT_MCGRP_ID - OVS_WIN_NL_MCGRP_START_ID);
1438 nl_msg_put_u32(reply, CTRL_ATTR_MCAST_GRP_ID, mcgrp_id);
1439 ovs_assert(mcgrp_name != NULL);
1440 nl_msg_put_string(reply, CTRL_ATTR_MCAST_GRP_NAME, mcgrp_name);
1441 nl_msg_end_nested(reply, mcgrp_ofs2);
1442 nl_msg_end_nested(reply, mcgrp_ofs1);
1443 }
1444
1445 /* Set the total length of the netlink message. */
1446 nlmsg = nl_msg_nlmsghdr(reply);
1447 nlmsg->nlmsg_len = ofpbuf_size(reply);
1448
1449 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
1450 family_policy, attrs, ARRAY_SIZE(family_policy))
1451 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
1452 ofpbuf_delete(reply);
1453 return EPROTO;
1454 }
1455
1456 *replyp = reply;
1457 return 0;
1458 }
1459 #endif
1460
1461 /* Finds the multicast group called 'group_name' in genl family 'family_name'.
1462 * When successful, writes its result to 'multicast_group' and returns 0.
1463 * Otherwise, clears 'multicast_group' and returns a positive error code.
1464 */
1465 int
1466 nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
1467 unsigned int *multicast_group)
1468 {
1469 struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
1470 const struct nlattr *mc;
1471 struct ofpbuf *reply;
1472 unsigned int left;
1473 int error;
1474
1475 *multicast_group = 0;
1476 error = do_lookup_genl_family(family_name, family_attrs, &reply);
1477 if (error) {
1478 return error;
1479 }
1480
1481 if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1482 error = EPROTO;
1483 goto exit;
1484 }
1485
1486 NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1487 static const struct nl_policy mc_policy[] = {
1488 [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
1489 [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
1490 };
1491
1492 struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
1493 const char *mc_name;
1494
1495 if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
1496 error = EPROTO;
1497 goto exit;
1498 }
1499
1500 mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
1501 if (!strcmp(group_name, mc_name)) {
1502 *multicast_group =
1503 nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
1504 error = 0;
1505 goto exit;
1506 }
1507 }
1508 error = EPROTO;
1509
1510 exit:
1511 ofpbuf_delete(reply);
1512 return error;
1513 }
1514
1515 /* If '*number' is 0, translates the given Generic Netlink family 'name' to a
1516 * number and stores it in '*number'. If successful, returns 0 and the caller
1517 * may use '*number' as the family number. On failure, returns a positive
1518 * errno value and '*number' caches the errno value. */
1519 int
1520 nl_lookup_genl_family(const char *name, int *number)
1521 {
1522 if (*number == 0) {
1523 struct nlattr *attrs[ARRAY_SIZE(family_policy)];
1524 struct ofpbuf *reply;
1525 int error;
1526
1527 error = do_lookup_genl_family(name, attrs, &reply);
1528 if (!error) {
1529 *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
1530 define_genl_family(*number, name);
1531 } else {
1532 *number = -error;
1533 }
1534 ofpbuf_delete(reply);
1535
1536 ovs_assert(*number != 0);
1537 }
1538 return *number > 0 ? 0 : -*number;
1539 }
1540 \f
1541 struct nl_pool {
1542 struct nl_sock *socks[16];
1543 int n;
1544 };
1545
1546 static struct ovs_mutex pool_mutex = OVS_MUTEX_INITIALIZER;
1547 static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
1548
1549 static int
1550 nl_pool_alloc(int protocol, struct nl_sock **sockp)
1551 {
1552 struct nl_sock *sock = NULL;
1553 struct nl_pool *pool;
1554
1555 ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
1556
1557 ovs_mutex_lock(&pool_mutex);
1558 pool = &pools[protocol];
1559 if (pool->n > 0) {
1560 sock = pool->socks[--pool->n];
1561 }
1562 ovs_mutex_unlock(&pool_mutex);
1563
1564 if (sock) {
1565 *sockp = sock;
1566 return 0;
1567 } else {
1568 return nl_sock_create(protocol, sockp);
1569 }
1570 }
1571
1572 static void
1573 nl_pool_release(struct nl_sock *sock)
1574 {
1575 if (sock) {
1576 struct nl_pool *pool = &pools[sock->protocol];
1577
1578 ovs_mutex_lock(&pool_mutex);
1579 if (pool->n < ARRAY_SIZE(pool->socks)) {
1580 pool->socks[pool->n++] = sock;
1581 sock = NULL;
1582 }
1583 ovs_mutex_unlock(&pool_mutex);
1584
1585 nl_sock_destroy(sock);
1586 }
1587 }
1588
1589 /* Sends 'request' to the kernel on a Netlink socket for the given 'protocol'
1590 * (e.g. NETLINK_ROUTE or NETLINK_GENERIC) and waits for a response. If
1591 * successful, returns 0. On failure, returns a positive errno value.
1592 *
1593 * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
1594 * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
1595 * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
1596 * reply, if any, is discarded.
1597 *
1598 * Before the message is sent, nlmsg_len in 'request' will be finalized to
1599 * match ofpbuf_size(msg), nlmsg_pid will be set to the pid of the socket used
1600 * for sending the request, and nlmsg_seq will be initialized.
1601 *
1602 * The caller is responsible for destroying 'request'.
1603 *
1604 * Bare Netlink is an unreliable transport protocol. This function layers
1605 * reliable delivery and reply semantics on top of bare Netlink.
1606 *
1607 * In Netlink, sending a request to the kernel is reliable enough, because the
1608 * kernel will tell us if the message cannot be queued (and we will in that
1609 * case put it on the transmit queue and wait until it can be delivered).
1610 *
1611 * Receiving the reply is the real problem: if the socket buffer is full when
1612 * the kernel tries to send the reply, the reply will be dropped. However, the
1613 * kernel sets a flag that a reply has been dropped. The next call to recv
1614 * then returns ENOBUFS. We can then re-send the request.
1615 *
1616 * Caveats:
1617 *
1618 * 1. Netlink depends on sequence numbers to match up requests and
1619 * replies. The sender of a request supplies a sequence number, and
1620 * the reply echos back that sequence number.
1621 *
1622 * This is fine, but (1) some kernel netlink implementations are
1623 * broken, in that they fail to echo sequence numbers and (2) this
1624 * function will drop packets with non-matching sequence numbers, so
1625 * that only a single request can be usefully transacted at a time.
1626 *
1627 * 2. Resending the request causes it to be re-executed, so the request
1628 * needs to be idempotent.
1629 */
1630 int
1631 nl_transact(int protocol, const struct ofpbuf *request,
1632 struct ofpbuf **replyp)
1633 {
1634 struct nl_sock *sock;
1635 int error;
1636
1637 error = nl_pool_alloc(protocol, &sock);
1638 if (error) {
1639 *replyp = NULL;
1640 return error;
1641 }
1642
1643 error = nl_sock_transact(sock, request, replyp);
1644
1645 nl_pool_release(sock);
1646 return error;
1647 }
1648
1649 /* Sends the 'request' member of the 'n' transactions in 'transactions' on a
1650 * Netlink socket for the given 'protocol' (e.g. NETLINK_ROUTE or
1651 * NETLINK_GENERIC), in order, and receives responses to all of them. Fills in
1652 * the 'error' member of each transaction with 0 if it was successful,
1653 * otherwise with a positive errno value. If 'reply' is nonnull, then it will
1654 * be filled with the reply if the message receives a detailed reply. In other
1655 * cases, i.e. where the request failed or had no reply beyond an indication of
1656 * success, 'reply' will be cleared if it is nonnull.
1657 *
1658 * The caller is responsible for destroying each request and reply, and the
1659 * transactions array itself.
1660 *
1661 * Before sending each message, this function will finalize nlmsg_len in each
1662 * 'request' to match the ofpbuf's size, set nlmsg_pid to the pid of the socket
1663 * used for the transaction, and initialize nlmsg_seq.
1664 *
1665 * Bare Netlink is an unreliable transport protocol. This function layers
1666 * reliable delivery and reply semantics on top of bare Netlink. See
1667 * nl_transact() for some caveats.
1668 */
1669 void
1670 nl_transact_multiple(int protocol,
1671 struct nl_transaction **transactions, size_t n)
1672 {
1673 struct nl_sock *sock;
1674 int error;
1675
1676 error = nl_pool_alloc(protocol, &sock);
1677 if (!error) {
1678 nl_sock_transact_multiple(sock, transactions, n);
1679 nl_pool_release(sock);
1680 } else {
1681 nl_sock_record_errors__(transactions, n, error);
1682 }
1683 }
1684
1685 \f
1686 static uint32_t
1687 nl_sock_allocate_seq(struct nl_sock *sock, unsigned int n)
1688 {
1689 uint32_t seq = sock->next_seq;
1690
1691 sock->next_seq += n;
1692
1693 /* Make it impossible for the next request for sequence numbers to wrap
1694 * around to 0. Start over with 1 to avoid ever using a sequence number of
1695 * 0, because the kernel uses sequence number 0 for notifications. */
1696 if (sock->next_seq >= UINT32_MAX / 2) {
1697 sock->next_seq = 1;
1698 }
1699
1700 return seq;
1701 }
1702
1703 static void
1704 nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
1705 {
1706 struct nlmsg_flag {
1707 unsigned int bits;
1708 const char *name;
1709 };
1710 static const struct nlmsg_flag flags[] = {
1711 { NLM_F_REQUEST, "REQUEST" },
1712 { NLM_F_MULTI, "MULTI" },
1713 { NLM_F_ACK, "ACK" },
1714 { NLM_F_ECHO, "ECHO" },
1715 { NLM_F_DUMP, "DUMP" },
1716 { NLM_F_ROOT, "ROOT" },
1717 { NLM_F_MATCH, "MATCH" },
1718 { NLM_F_ATOMIC, "ATOMIC" },
1719 };
1720 const struct nlmsg_flag *flag;
1721 uint16_t flags_left;
1722
1723 ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
1724 h->nlmsg_len, h->nlmsg_type);
1725 if (h->nlmsg_type == NLMSG_NOOP) {
1726 ds_put_cstr(ds, "(no-op)");
1727 } else if (h->nlmsg_type == NLMSG_ERROR) {
1728 ds_put_cstr(ds, "(error)");
1729 } else if (h->nlmsg_type == NLMSG_DONE) {
1730 ds_put_cstr(ds, "(done)");
1731 } else if (h->nlmsg_type == NLMSG_OVERRUN) {
1732 ds_put_cstr(ds, "(overrun)");
1733 } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
1734 ds_put_cstr(ds, "(reserved)");
1735 } else if (protocol == NETLINK_GENERIC) {
1736 ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
1737 } else {
1738 ds_put_cstr(ds, "(family-defined)");
1739 }
1740 ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
1741 flags_left = h->nlmsg_flags;
1742 for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
1743 if ((flags_left & flag->bits) == flag->bits) {
1744 ds_put_format(ds, "[%s]", flag->name);
1745 flags_left &= ~flag->bits;
1746 }
1747 }
1748 if (flags_left) {
1749 ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
1750 }
1751 ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
1752 h->nlmsg_seq, h->nlmsg_pid);
1753 }
1754
1755 static char *
1756 nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
1757 {
1758 struct ds ds = DS_EMPTY_INITIALIZER;
1759 const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
1760 if (h) {
1761 nlmsghdr_to_string(h, protocol, &ds);
1762 if (h->nlmsg_type == NLMSG_ERROR) {
1763 const struct nlmsgerr *e;
1764 e = ofpbuf_at(buffer, NLMSG_HDRLEN,
1765 NLMSG_ALIGN(sizeof(struct nlmsgerr)));
1766 if (e) {
1767 ds_put_format(&ds, " error(%d", e->error);
1768 if (e->error < 0) {
1769 ds_put_format(&ds, "(%s)", ovs_strerror(-e->error));
1770 }
1771 ds_put_cstr(&ds, ", in-reply-to(");
1772 nlmsghdr_to_string(&e->msg, protocol, &ds);
1773 ds_put_cstr(&ds, "))");
1774 } else {
1775 ds_put_cstr(&ds, " error(truncated)");
1776 }
1777 } else if (h->nlmsg_type == NLMSG_DONE) {
1778 int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
1779 if (error) {
1780 ds_put_format(&ds, " done(%d", *error);
1781 if (*error < 0) {
1782 ds_put_format(&ds, "(%s)", ovs_strerror(-*error));
1783 }
1784 ds_put_cstr(&ds, ")");
1785 } else {
1786 ds_put_cstr(&ds, " done(truncated)");
1787 }
1788 } else if (protocol == NETLINK_GENERIC) {
1789 struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
1790 if (genl) {
1791 ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
1792 genl->cmd, genl->version);
1793 }
1794 }
1795 } else {
1796 ds_put_cstr(&ds, "nl(truncated)");
1797 }
1798 return ds.string;
1799 }
1800
1801 static void
1802 log_nlmsg(const char *function, int error,
1803 const void *message, size_t size, int protocol)
1804 {
1805 struct ofpbuf buffer;
1806 char *nlmsg;
1807
1808 if (!VLOG_IS_DBG_ENABLED()) {
1809 return;
1810 }
1811
1812 ofpbuf_use_const(&buffer, message, size);
1813 nlmsg = nlmsg_to_string(&buffer, protocol);
1814 VLOG_DBG_RL(&rl, "%s (%s): %s", function, ovs_strerror(error), nlmsg);
1815 free(nlmsg);
1816 }