]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netlink-socket.c
1a1b5e42a3082f5cae4f119bb8d87f044681d4b7
[mirror_ovs.git] / lib / netlink-socket.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "netlink-socket.h"
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <stdlib.h>
22 #include <sys/types.h>
23 #include <sys/uio.h>
24 #include <unistd.h>
25 #include "coverage.h"
26 #include "dynamic-string.h"
27 #include "hash.h"
28 #include "hmap.h"
29 #include "netlink.h"
30 #include "netlink-protocol.h"
31 #include "odp-netlink.h"
32 #include "ofpbuf.h"
33 #include "ovs-thread.h"
34 #include "poll-loop.h"
35 #include "seq.h"
36 #include "socket-util.h"
37 #include "util.h"
38 #include "openvswitch/vlog.h"
39
40 VLOG_DEFINE_THIS_MODULE(netlink_socket);
41
42 COVERAGE_DEFINE(netlink_overflow);
43 COVERAGE_DEFINE(netlink_received);
44 COVERAGE_DEFINE(netlink_recv_jumbo);
45 COVERAGE_DEFINE(netlink_sent);
46
47 /* Linux header file confusion causes this to be undefined. */
48 #ifndef SOL_NETLINK
49 #define SOL_NETLINK 270
50 #endif
51
52 /* A single (bad) Netlink message can in theory dump out many, many log
53 * messages, so the burst size is set quite high here to avoid missing useful
54 * information. Also, at high logging levels we log *all* Netlink messages. */
55 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
56
57 static uint32_t nl_sock_allocate_seq(struct nl_sock *, unsigned int n);
58 static void log_nlmsg(const char *function, int error,
59 const void *message, size_t size, int protocol);
60 #ifdef _WIN32
61 static int get_sock_pid_from_kernel(struct nl_sock *sock);
62 #endif
63 \f
64 /* Netlink sockets. */
65
66 struct nl_sock {
67 #ifdef _WIN32
68 HANDLE handle;
69 OVERLAPPED overlapped;
70 DWORD read_ioctl;
71 #else
72 int fd;
73 #endif
74 uint32_t next_seq;
75 uint32_t pid;
76 int protocol;
77 unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
78 };
79
80 /* Compile-time limit on iovecs, so that we can allocate a maximum-size array
81 * of iovecs on the stack. */
82 #define MAX_IOVS 128
83
84 /* Maximum number of iovecs that may be passed to sendmsg, capped at a
85 * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
86 *
87 * Initialized by nl_sock_create(). */
88 static int max_iovs;
89
90 static int nl_pool_alloc(int protocol, struct nl_sock **sockp);
91 static void nl_pool_release(struct nl_sock *);
92
93 /* Creates a new netlink socket for the given netlink 'protocol'
94 * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
95 * new socket if successful, otherwise returns a positive errno value. */
96 int
97 nl_sock_create(int protocol, struct nl_sock **sockp)
98 {
99 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
100 struct nl_sock *sock;
101 #ifndef _WIN32
102 struct sockaddr_nl local, remote;
103 #endif
104 socklen_t local_size;
105 int rcvbuf;
106 int retval = 0;
107
108 if (ovsthread_once_start(&once)) {
109 int save_errno = errno;
110 errno = 0;
111
112 max_iovs = sysconf(_SC_UIO_MAXIOV);
113 if (max_iovs < _XOPEN_IOV_MAX) {
114 if (max_iovs == -1 && errno) {
115 VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", ovs_strerror(errno));
116 }
117 max_iovs = _XOPEN_IOV_MAX;
118 } else if (max_iovs > MAX_IOVS) {
119 max_iovs = MAX_IOVS;
120 }
121
122 errno = save_errno;
123 ovsthread_once_done(&once);
124 }
125
126 *sockp = NULL;
127 sock = xmalloc(sizeof *sock);
128
129 #ifdef _WIN32
130 sock->handle = CreateFile(OVS_DEVICE_NAME_USER,
131 GENERIC_READ | GENERIC_WRITE,
132 FILE_SHARE_READ | FILE_SHARE_WRITE,
133 NULL, OPEN_EXISTING,
134 FILE_FLAG_OVERLAPPED, NULL);
135
136 if (sock->handle == INVALID_HANDLE_VALUE) {
137 VLOG_ERR("fcntl: %s", ovs_lasterror_to_string());
138 goto error;
139 }
140
141 memset(&sock->overlapped, 0, sizeof sock->overlapped);
142 sock->overlapped.hEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
143 if (sock->overlapped.hEvent == NULL) {
144 VLOG_ERR("fcntl: %s", ovs_lasterror_to_string());
145 goto error;
146 }
147 /* Initialize the type/ioctl to Generic */
148 sock->read_ioctl = OVS_IOCTL_READ;
149 #else
150 sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
151 if (sock->fd < 0) {
152 VLOG_ERR("fcntl: %s", ovs_strerror(errno));
153 goto error;
154 }
155 #endif
156
157 sock->protocol = protocol;
158 sock->next_seq = 1;
159
160 rcvbuf = 1024 * 1024;
161 #ifdef _WIN32
162 sock->rcvbuf = rcvbuf;
163 retval = get_sock_pid_from_kernel(sock);
164 if (retval != 0) {
165 goto error;
166 }
167 #else
168 if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
169 &rcvbuf, sizeof rcvbuf)) {
170 /* Only root can use SO_RCVBUFFORCE. Everyone else gets EPERM.
171 * Warn only if the failure is therefore unexpected. */
172 if (errno != EPERM) {
173 VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed "
174 "(%s)", rcvbuf, ovs_strerror(errno));
175 }
176 }
177
178 retval = get_socket_rcvbuf(sock->fd);
179 if (retval < 0) {
180 retval = -retval;
181 goto error;
182 }
183 sock->rcvbuf = retval;
184 retval = 0;
185
186 /* Connect to kernel (pid 0) as remote address. */
187 memset(&remote, 0, sizeof remote);
188 remote.nl_family = AF_NETLINK;
189 remote.nl_pid = 0;
190 if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
191 VLOG_ERR("connect(0): %s", ovs_strerror(errno));
192 goto error;
193 }
194
195 /* Obtain pid assigned by kernel. */
196 local_size = sizeof local;
197 if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
198 VLOG_ERR("getsockname: %s", ovs_strerror(errno));
199 goto error;
200 }
201 if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
202 VLOG_ERR("getsockname returned bad Netlink name");
203 retval = EINVAL;
204 goto error;
205 }
206 sock->pid = local.nl_pid;
207 #endif
208
209 *sockp = sock;
210 return 0;
211
212 error:
213 if (retval == 0) {
214 retval = errno;
215 if (retval == 0) {
216 retval = EINVAL;
217 }
218 }
219 #ifdef _WIN32
220 if (sock->overlapped.hEvent) {
221 CloseHandle(sock->overlapped.hEvent);
222 }
223 if (sock->handle != INVALID_HANDLE_VALUE) {
224 CloseHandle(sock->handle);
225 }
226 #else
227 if (sock->fd >= 0) {
228 close(sock->fd);
229 }
230 #endif
231 free(sock);
232 return retval;
233 }
234
235 /* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
236 * sets '*sockp' to the new socket if successful, otherwise returns a positive
237 * errno value. */
238 int
239 nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
240 {
241 return nl_sock_create(src->protocol, sockp);
242 }
243
244 /* Destroys netlink socket 'sock'. */
245 void
246 nl_sock_destroy(struct nl_sock *sock)
247 {
248 if (sock) {
249 #ifdef _WIN32
250 if (sock->overlapped.hEvent) {
251 CloseHandle(sock->overlapped.hEvent);
252 }
253 CloseHandle(sock->handle);
254 #else
255 close(sock->fd);
256 #endif
257 free(sock);
258 }
259 }
260
261 #ifdef _WIN32
262 /* Reads the pid for 'sock' generated in the kernel datapath. The function
263 * uses a separate IOCTL instead of a transaction semantic to avoid unnecessary
264 * message overhead. */
265 static int
266 get_sock_pid_from_kernel(struct nl_sock *sock)
267 {
268 uint32_t pid = 0;
269 int retval = 0;
270 DWORD bytes = 0;
271
272 if (!DeviceIoControl(sock->handle, OVS_IOCTL_GET_PID,
273 NULL, 0, &pid, sizeof(pid),
274 &bytes, NULL)) {
275 retval = EINVAL;
276 } else {
277 if (bytes < sizeof(pid)) {
278 retval = EINVAL;
279 } else {
280 sock->pid = pid;
281 }
282 }
283
284 return retval;
285 }
286 #endif /* _WIN32 */
287
288 #ifdef _WIN32
289 static int __inline
290 nl_sock_mcgroup(struct nl_sock *sock, unsigned int multicast_group, bool join)
291 {
292 struct ofpbuf request;
293 uint64_t request_stub[128];
294 struct ovs_header *ovs_header;
295 struct nlmsghdr *nlmsg;
296 int error;
297
298 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
299
300 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
301 OVS_CTRL_CMD_MC_SUBSCRIBE_REQ,
302 OVS_WIN_CONTROL_VERSION);
303
304 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
305 ovs_header->dp_ifindex = 0;
306
307 nl_msg_put_u32(&request, OVS_NL_ATTR_MCAST_GRP, multicast_group);
308 nl_msg_put_u8(&request, OVS_NL_ATTR_MCAST_JOIN, join ? 1 : 0);
309
310 error = nl_sock_send(sock, &request, true);
311 ofpbuf_uninit(&request);
312 return error;
313 }
314 #endif
315 /* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
316 * successful, otherwise a positive errno value.
317 *
318 * A socket that is subscribed to a multicast group that receives asynchronous
319 * notifications must not be used for Netlink transactions or dumps, because
320 * transactions and dumps can cause notifications to be lost.
321 *
322 * Multicast group numbers are always positive.
323 *
324 * It is not an error to attempt to join a multicast group to which a socket
325 * already belongs. */
326 int
327 nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
328 {
329 #ifdef _WIN32
330 /* Set the socket type as a "multicast" socket */
331 sock->read_ioctl = OVS_IOCTL_READ_EVENT;
332 int error = nl_sock_mcgroup(sock, multicast_group, true);
333 if (error) {
334 sock->read_ioctl = OVS_IOCTL_READ;
335 VLOG_WARN("could not join multicast group %u (%s)",
336 multicast_group, ovs_strerror(error));
337 return error;
338 }
339 #else
340 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
341 &multicast_group, sizeof multicast_group) < 0) {
342 VLOG_WARN("could not join multicast group %u (%s)",
343 multicast_group, ovs_strerror(errno));
344 return errno;
345 }
346 #endif
347 return 0;
348 }
349
350 #ifdef _WIN32
351 int
352 nl_sock_subscribe_packets(struct nl_sock *sock)
353 {
354 int error;
355
356 if (sock->read_ioctl != OVS_IOCTL_READ) {
357 return EINVAL;
358 }
359
360 error = nl_sock_subscribe_packet__(sock, true);
361 if (error) {
362 VLOG_WARN("could not subscribe packets (%s)",
363 ovs_strerror(error));
364 return error;
365 }
366 sock->read_ioctl = OVS_IOCTL_READ_PACKET;
367
368 return 0;
369 }
370
371 int
372 nl_sock_unsubscribe_packets(struct nl_sock *sock)
373 {
374 ovs_assert(sock->read_ioctl == OVS_IOCTL_READ_PACKET);
375
376 int error = nl_sock_subscribe_packet__(sock, false);
377 if (error) {
378 VLOG_WARN("could not unsubscribe to packets (%s)",
379 ovs_strerror(error));
380 return error;
381 }
382
383 sock->read_ioctl = OVS_IOCTL_READ;
384 return 0;
385 }
386
387 int
388 nl_sock_subscribe_packet__(struct nl_sock *sock, bool subscribe)
389 {
390 struct ofpbuf request;
391 uint64_t request_stub[128];
392 struct ovs_header *ovs_header;
393 struct nlmsghdr *nlmsg;
394 int error;
395
396 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
397 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
398 OVS_CTRL_CMD_PACKET_SUBSCRIBE_REQ,
399 OVS_WIN_CONTROL_VERSION);
400
401 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
402 ovs_header->dp_ifindex = 0;
403 nl_msg_put_u8(&request, OVS_NL_ATTR_PACKET_SUBSCRIBE, subscribe ? 1 : 0);
404 nl_msg_put_u32(&request, OVS_NL_ATTR_PACKET_PID, sock->pid);
405
406 error = nl_sock_send(sock, &request, true);
407 ofpbuf_uninit(&request);
408 return error;
409 }
410 #endif
411
412 /* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
413 * successful, otherwise a positive errno value.
414 *
415 * Multicast group numbers are always positive.
416 *
417 * It is not an error to attempt to leave a multicast group to which a socket
418 * does not belong.
419 *
420 * On success, reading from 'sock' will still return any messages that were
421 * received on 'multicast_group' before the group was left. */
422 int
423 nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
424 {
425 #ifdef _WIN32
426 int error = nl_sock_mcgroup(sock, multicast_group, false);
427 if (error) {
428 VLOG_WARN("could not leave multicast group %u (%s)",
429 multicast_group, ovs_strerror(error));
430 return error;
431 }
432 sock->read_ioctl = OVS_IOCTL_READ;
433 #else
434 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
435 &multicast_group, sizeof multicast_group) < 0) {
436 VLOG_WARN("could not leave multicast group %u (%s)",
437 multicast_group, ovs_strerror(errno));
438 return errno;
439 }
440 #endif
441 return 0;
442 }
443
444 static int
445 nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg,
446 uint32_t nlmsg_seq, bool wait)
447 {
448 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
449 int error;
450
451 nlmsg->nlmsg_len = msg->size;
452 nlmsg->nlmsg_seq = nlmsg_seq;
453 nlmsg->nlmsg_pid = sock->pid;
454 do {
455 int retval;
456 #ifdef _WIN32
457 DWORD bytes;
458
459 if (!DeviceIoControl(sock->handle, OVS_IOCTL_WRITE,
460 msg->data, msg->size, NULL, 0,
461 &bytes, NULL)) {
462 retval = -1;
463 /* XXX: Map to a more appropriate error based on GetLastError(). */
464 errno = EINVAL;
465 VLOG_DBG_RL(&rl, "fatal driver failure in write: %s",
466 ovs_lasterror_to_string());
467 } else {
468 retval = msg->size;
469 }
470 #else
471 retval = send(sock->fd, msg->data, msg->size,
472 wait ? 0 : MSG_DONTWAIT);
473 #endif
474 error = retval < 0 ? errno : 0;
475 } while (error == EINTR);
476 log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol);
477 if (!error) {
478 COVERAGE_INC(netlink_sent);
479 }
480 return error;
481 }
482
483 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
484 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
485 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to a fresh
486 * sequence number, before the message is sent.
487 *
488 * Returns 0 if successful, otherwise a positive errno value. If
489 * 'wait' is true, then the send will wait until buffer space is ready;
490 * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
491 int
492 nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
493 {
494 return nl_sock_send_seq(sock, msg, nl_sock_allocate_seq(sock, 1), wait);
495 }
496
497 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
498 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
499 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to
500 * 'nlmsg_seq', before the message is sent.
501 *
502 * Returns 0 if successful, otherwise a positive errno value. If
503 * 'wait' is true, then the send will wait until buffer space is ready;
504 * otherwise, returns EAGAIN if the 'sock' send buffer is full.
505 *
506 * This function is suitable for sending a reply to a request that was received
507 * with sequence number 'nlmsg_seq'. Otherwise, use nl_sock_send() instead. */
508 int
509 nl_sock_send_seq(struct nl_sock *sock, const struct ofpbuf *msg,
510 uint32_t nlmsg_seq, bool wait)
511 {
512 return nl_sock_send__(sock, msg, nlmsg_seq, wait);
513 }
514
515 static int
516 nl_sock_recv__(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
517 {
518 /* We can't accurately predict the size of the data to be received. The
519 * caller is supposed to have allocated enough space in 'buf' to handle the
520 * "typical" case. To handle exceptions, we make available enough space in
521 * 'tail' to allow Netlink messages to be up to 64 kB long (a reasonable
522 * figure since that's the maximum length of a Netlink attribute). */
523 struct nlmsghdr *nlmsghdr;
524 uint8_t tail[65536];
525 struct iovec iov[2];
526 struct msghdr msg;
527 ssize_t retval;
528 int error;
529
530 ovs_assert(buf->allocated >= sizeof *nlmsghdr);
531 ofpbuf_clear(buf);
532
533 iov[0].iov_base = buf->base;
534 iov[0].iov_len = buf->allocated;
535 iov[1].iov_base = tail;
536 iov[1].iov_len = sizeof tail;
537
538 memset(&msg, 0, sizeof msg);
539 msg.msg_iov = iov;
540 msg.msg_iovlen = 2;
541
542 /* Receive a Netlink message from the kernel.
543 *
544 * This works around a kernel bug in which the kernel returns an error code
545 * as if it were the number of bytes read. It doesn't actually modify
546 * anything in the receive buffer in that case, so we can initialize the
547 * Netlink header with an impossible message length and then, upon success,
548 * check whether it changed. */
549 nlmsghdr = buf->base;
550 do {
551 nlmsghdr->nlmsg_len = UINT32_MAX;
552 #ifdef _WIN32
553 DWORD bytes;
554 if (!DeviceIoControl(sock->handle, sock->read_ioctl,
555 NULL, 0, tail, sizeof tail, &bytes, NULL)) {
556 VLOG_DBG_RL(&rl, "fatal driver failure in transact: %s",
557 ovs_lasterror_to_string());
558 retval = -1;
559 /* XXX: Map to a more appropriate error. */
560 errno = EINVAL;
561 } else {
562 retval = bytes;
563 if (retval == 0) {
564 retval = -1;
565 errno = EAGAIN;
566 } else {
567 if (retval >= buf->allocated) {
568 ofpbuf_reinit(buf, retval);
569 nlmsghdr = buf->base;
570 nlmsghdr->nlmsg_len = UINT32_MAX;
571 }
572 memcpy(buf->data, tail, retval);
573 buf->size = retval;
574 }
575 }
576 #else
577 retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
578 #endif
579 error = (retval < 0 ? errno
580 : retval == 0 ? ECONNRESET /* not possible? */
581 : nlmsghdr->nlmsg_len != UINT32_MAX ? 0
582 : retval);
583 } while (error == EINTR);
584 if (error) {
585 if (error == ENOBUFS) {
586 /* Socket receive buffer overflow dropped one or more messages that
587 * the kernel tried to send to us. */
588 COVERAGE_INC(netlink_overflow);
589 }
590 return error;
591 }
592
593 if (msg.msg_flags & MSG_TRUNC) {
594 VLOG_ERR_RL(&rl, "truncated message (longer than %"PRIuSIZE" bytes)",
595 sizeof tail);
596 return E2BIG;
597 }
598
599 if (retval < sizeof *nlmsghdr
600 || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
601 || nlmsghdr->nlmsg_len > retval) {
602 VLOG_ERR_RL(&rl, "received invalid nlmsg (%"PRIuSIZE" bytes < %"PRIuSIZE")",
603 retval, sizeof *nlmsghdr);
604 return EPROTO;
605 }
606 #ifndef _WIN32
607 buf->size = MIN(retval, buf->allocated);
608 if (retval > buf->allocated) {
609 COVERAGE_INC(netlink_recv_jumbo);
610 ofpbuf_put(buf, tail, retval - buf->allocated);
611 }
612 #endif
613
614 log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol);
615 COVERAGE_INC(netlink_received);
616
617 return 0;
618 }
619
620 /* Tries to receive a Netlink message from the kernel on 'sock' into 'buf'. If
621 * 'wait' is true, waits for a message to be ready. Otherwise, fails with
622 * EAGAIN if the 'sock' receive buffer is empty.
623 *
624 * The caller must have initialized 'buf' with an allocation of at least
625 * NLMSG_HDRLEN bytes. For best performance, the caller should allocate enough
626 * space for a "typical" message.
627 *
628 * On success, returns 0 and replaces 'buf''s previous content by the received
629 * message. This function expands 'buf''s allocated memory, as necessary, to
630 * hold the actual size of the received message.
631 *
632 * On failure, returns a positive errno value and clears 'buf' to zero length.
633 * 'buf' retains its previous memory allocation.
634 *
635 * Regardless of success or failure, this function resets 'buf''s headroom to
636 * 0. */
637 int
638 nl_sock_recv(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
639 {
640 return nl_sock_recv__(sock, buf, wait);
641 }
642
643 static void
644 nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
645 int error)
646 {
647 size_t i;
648
649 for (i = 0; i < n; i++) {
650 struct nl_transaction *txn = transactions[i];
651
652 txn->error = error;
653 if (txn->reply) {
654 ofpbuf_clear(txn->reply);
655 }
656 }
657 }
658
659 static int
660 nl_sock_transact_multiple__(struct nl_sock *sock,
661 struct nl_transaction **transactions, size_t n,
662 size_t *done)
663 {
664 uint64_t tmp_reply_stub[1024 / 8];
665 struct nl_transaction tmp_txn;
666 struct ofpbuf tmp_reply;
667
668 uint32_t base_seq;
669 struct iovec iovs[MAX_IOVS];
670 struct msghdr msg;
671 int error;
672 int i;
673
674 base_seq = nl_sock_allocate_seq(sock, n);
675 *done = 0;
676 for (i = 0; i < n; i++) {
677 struct nl_transaction *txn = transactions[i];
678 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);
679
680 nlmsg->nlmsg_len = txn->request->size;
681 nlmsg->nlmsg_seq = base_seq + i;
682 nlmsg->nlmsg_pid = sock->pid;
683
684 iovs[i].iov_base = txn->request->data;
685 iovs[i].iov_len = txn->request->size;
686 }
687
688 #ifndef _WIN32
689 memset(&msg, 0, sizeof msg);
690 msg.msg_iov = iovs;
691 msg.msg_iovlen = n;
692 do {
693 error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
694 } while (error == EINTR);
695
696 for (i = 0; i < n; i++) {
697 struct nl_transaction *txn = transactions[i];
698
699 log_nlmsg(__func__, error, txn->request->data,
700 txn->request->size, sock->protocol);
701 }
702 if (!error) {
703 COVERAGE_ADD(netlink_sent, n);
704 }
705
706 if (error) {
707 return error;
708 }
709
710 ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
711 tmp_txn.request = NULL;
712 tmp_txn.reply = &tmp_reply;
713 tmp_txn.error = 0;
714 while (n > 0) {
715 struct nl_transaction *buf_txn, *txn;
716 uint32_t seq;
717
718 /* Find a transaction whose buffer we can use for receiving a reply.
719 * If no such transaction is left, use tmp_txn. */
720 buf_txn = &tmp_txn;
721 for (i = 0; i < n; i++) {
722 if (transactions[i]->reply) {
723 buf_txn = transactions[i];
724 break;
725 }
726 }
727
728 /* Receive a reply. */
729 error = nl_sock_recv__(sock, buf_txn->reply, false);
730 if (error) {
731 if (error == EAGAIN) {
732 nl_sock_record_errors__(transactions, n, 0);
733 *done += n;
734 error = 0;
735 }
736 break;
737 }
738
739 /* Match the reply up with a transaction. */
740 seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
741 if (seq < base_seq || seq >= base_seq + n) {
742 VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
743 continue;
744 }
745 i = seq - base_seq;
746 txn = transactions[i];
747
748 /* Fill in the results for 'txn'. */
749 if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
750 if (txn->reply) {
751 ofpbuf_clear(txn->reply);
752 }
753 if (txn->error) {
754 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
755 error, ovs_strerror(txn->error));
756 }
757 } else {
758 txn->error = 0;
759 if (txn->reply && txn != buf_txn) {
760 /* Swap buffers. */
761 struct ofpbuf *reply = buf_txn->reply;
762 buf_txn->reply = txn->reply;
763 txn->reply = reply;
764 }
765 }
766
767 /* Fill in the results for transactions before 'txn'. (We have to do
768 * this after the results for 'txn' itself because of the buffer swap
769 * above.) */
770 nl_sock_record_errors__(transactions, i, 0);
771
772 /* Advance. */
773 *done += i + 1;
774 transactions += i + 1;
775 n -= i + 1;
776 base_seq += i + 1;
777 }
778 ofpbuf_uninit(&tmp_reply);
779 #else
780 error = 0;
781 uint8_t reply_buf[65536];
782 for (i = 0; i < n; i++) {
783 DWORD reply_len;
784 bool ret;
785 struct nl_transaction *txn = transactions[i];
786 struct nlmsghdr *request_nlmsg, *reply_nlmsg;
787
788 ret = DeviceIoControl(sock->handle, OVS_IOCTL_TRANSACT,
789 txn->request->data,
790 txn->request->size,
791 reply_buf, sizeof reply_buf,
792 &reply_len, NULL);
793
794 if (ret && reply_len == 0) {
795 /*
796 * The current transaction did not produce any data to read and that
797 * is not an error as such. Continue with the remainder of the
798 * transactions.
799 */
800 txn->error = 0;
801 if (txn->reply) {
802 ofpbuf_clear(txn->reply);
803 }
804 } else if (!ret) {
805 /* XXX: Map to a more appropriate error. */
806 error = EINVAL;
807 VLOG_DBG_RL(&rl, "fatal driver failure: %s",
808 ovs_lasterror_to_string());
809 break;
810 }
811
812 if (reply_len != 0) {
813 if (reply_len < sizeof *reply_nlmsg) {
814 nl_sock_record_errors__(transactions, n, 0);
815 VLOG_DBG_RL(&rl, "insufficient length of reply %#"PRIu32
816 " for seq: %#"PRIx32, reply_len, request_nlmsg->nlmsg_seq);
817 break;
818 }
819
820 /* Validate the sequence number in the reply. */
821 request_nlmsg = nl_msg_nlmsghdr(txn->request);
822 reply_nlmsg = (struct nlmsghdr *)reply_buf;
823
824 if (request_nlmsg->nlmsg_seq != reply_nlmsg->nlmsg_seq) {
825 ovs_assert(request_nlmsg->nlmsg_seq == reply_nlmsg->nlmsg_seq);
826 VLOG_DBG_RL(&rl, "mismatched seq request %#"PRIx32
827 ", reply %#"PRIx32, request_nlmsg->nlmsg_seq,
828 reply_nlmsg->nlmsg_seq);
829 break;
830 }
831
832 /* Handle errors embedded within the netlink message. */
833 ofpbuf_use_stub(&tmp_reply, reply_buf, sizeof reply_buf);
834 tmp_reply.size = sizeof reply_buf;
835 if (nl_msg_nlmsgerr(&tmp_reply, &txn->error)) {
836 if (txn->reply) {
837 ofpbuf_clear(txn->reply);
838 }
839 if (txn->error) {
840 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
841 error, ovs_strerror(txn->error));
842 }
843 } else {
844 txn->error = 0;
845 if (txn->reply) {
846 /* Copy the reply to the buffer specified by the caller. */
847 if (reply_len > txn->reply->allocated) {
848 ofpbuf_reinit(txn->reply, reply_len);
849 }
850 memcpy(txn->reply->data, reply_buf, reply_len);
851 txn->reply->size = reply_len;
852 }
853 }
854 ofpbuf_uninit(&tmp_reply);
855 }
856
857 /* Count the number of successful transactions. */
858 (*done)++;
859
860 }
861
862 if (!error) {
863 COVERAGE_ADD(netlink_sent, n);
864 }
865 #endif
866
867 return error;
868 }
869
870 static void
871 nl_sock_transact_multiple(struct nl_sock *sock,
872 struct nl_transaction **transactions, size_t n)
873 {
874 int max_batch_count;
875 int error;
876
877 if (!n) {
878 return;
879 }
880
881 /* In theory, every request could have a 64 kB reply. But the default and
882 * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
883 * be a bit below 128 kB, so that would only allow a single message in a
884 * "batch". So we assume that replies average (at most) 4 kB, which allows
885 * a good deal of batching.
886 *
887 * In practice, most of the requests that we batch either have no reply at
888 * all or a brief reply. */
889 max_batch_count = MAX(sock->rcvbuf / 4096, 1);
890 max_batch_count = MIN(max_batch_count, max_iovs);
891
892 while (n > 0) {
893 size_t count, bytes;
894 size_t done;
895
896 /* Batch up to 'max_batch_count' transactions. But cap it at about a
897 * page of requests total because big skbuffs are expensive to
898 * allocate in the kernel. */
899 #if defined(PAGESIZE)
900 enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
901 #else
902 enum { MAX_BATCH_BYTES = 4096 - 512 };
903 #endif
904 bytes = transactions[0]->request->size;
905 for (count = 1; count < n && count < max_batch_count; count++) {
906 if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
907 break;
908 }
909 bytes += transactions[count]->request->size;
910 }
911
912 error = nl_sock_transact_multiple__(sock, transactions, count, &done);
913 transactions += done;
914 n -= done;
915
916 if (error == ENOBUFS) {
917 VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
918 } else if (error) {
919 VLOG_ERR_RL(&rl, "transaction error (%s)", ovs_strerror(error));
920 nl_sock_record_errors__(transactions, n, error);
921 if (error != EAGAIN) {
922 /* A fatal error has occurred. Abort the rest of
923 * transactions. */
924 break;
925 }
926 }
927 }
928 }
929
930 static int
931 nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
932 struct ofpbuf **replyp)
933 {
934 struct nl_transaction *transactionp;
935 struct nl_transaction transaction;
936
937 transaction.request = CONST_CAST(struct ofpbuf *, request);
938 transaction.reply = replyp ? ofpbuf_new(1024) : NULL;
939 transactionp = &transaction;
940
941 nl_sock_transact_multiple(sock, &transactionp, 1);
942
943 if (replyp) {
944 if (transaction.error) {
945 ofpbuf_delete(transaction.reply);
946 *replyp = NULL;
947 } else {
948 *replyp = transaction.reply;
949 }
950 }
951
952 return transaction.error;
953 }
954
955 /* Drain all the messages currently in 'sock''s receive queue. */
956 int
957 nl_sock_drain(struct nl_sock *sock)
958 {
959 #ifdef _WIN32
960 return 0;
961 #else
962 return drain_rcvbuf(sock->fd);
963 #endif
964 }
965
966 /* Starts a Netlink "dump" operation, by sending 'request' to the kernel on a
967 * Netlink socket created with the given 'protocol', and initializes 'dump' to
968 * reflect the state of the operation.
969 *
970 * 'request' must contain a Netlink message. Before sending the message,
971 * nlmsg_len will be finalized to match request->size, and nlmsg_pid will be
972 * set to the Netlink socket's pid. NLM_F_DUMP and NLM_F_ACK will be set in
973 * nlmsg_flags.
974 *
975 * The design of this Netlink socket library ensures that the dump is reliable.
976 *
977 * This function provides no status indication. nl_dump_done() provides an
978 * error status for the entire dump operation.
979 *
980 * The caller must eventually destroy 'request'.
981 */
982 void
983 nl_dump_start(struct nl_dump *dump, int protocol, const struct ofpbuf *request)
984 {
985 nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
986
987 ovs_mutex_init(&dump->mutex);
988 ovs_mutex_lock(&dump->mutex);
989 dump->status = nl_pool_alloc(protocol, &dump->sock);
990 if (!dump->status) {
991 dump->status = nl_sock_send__(dump->sock, request,
992 nl_sock_allocate_seq(dump->sock, 1),
993 true);
994 }
995 dump->nl_seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
996 ovs_mutex_unlock(&dump->mutex);
997 }
998
999 static int
1000 nl_dump_refill(struct nl_dump *dump, struct ofpbuf *buffer)
1001 OVS_REQUIRES(dump->mutex)
1002 {
1003 struct nlmsghdr *nlmsghdr;
1004 int error;
1005
1006 while (!buffer->size) {
1007 error = nl_sock_recv__(dump->sock, buffer, false);
1008 if (error) {
1009 /* The kernel never blocks providing the results of a dump, so
1010 * error == EAGAIN means that we've read the whole thing, and
1011 * therefore transform it into EOF. (The kernel always provides
1012 * NLMSG_DONE as a sentinel. Some other thread must have received
1013 * that already but not yet signaled it in 'status'.)
1014 *
1015 * Any other error is just an error. */
1016 return error == EAGAIN ? EOF : error;
1017 }
1018
1019 nlmsghdr = nl_msg_nlmsghdr(buffer);
1020 if (dump->nl_seq != nlmsghdr->nlmsg_seq) {
1021 VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
1022 nlmsghdr->nlmsg_seq, dump->nl_seq);
1023 ofpbuf_clear(buffer);
1024 }
1025 }
1026
1027 if (nl_msg_nlmsgerr(buffer, &error) && error) {
1028 VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
1029 ovs_strerror(error));
1030 ofpbuf_clear(buffer);
1031 return error;
1032 }
1033
1034 return 0;
1035 }
1036
1037 static int
1038 nl_dump_next__(struct ofpbuf *reply, struct ofpbuf *buffer)
1039 {
1040 struct nlmsghdr *nlmsghdr = nl_msg_next(buffer, reply);
1041 if (!nlmsghdr) {
1042 VLOG_WARN_RL(&rl, "netlink dump contains message fragment");
1043 return EPROTO;
1044 } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
1045 return EOF;
1046 } else {
1047 return 0;
1048 }
1049 }
1050
1051 /* Attempts to retrieve another reply from 'dump' into 'buffer'. 'dump' must
1052 * have been initialized with nl_dump_start(), and 'buffer' must have been
1053 * initialized. 'buffer' should be at least NL_DUMP_BUFSIZE bytes long.
1054 *
1055 * If successful, returns true and points 'reply->data' and
1056 * 'reply->size' to the message that was retrieved. The caller must not
1057 * modify 'reply' (because it points within 'buffer', which will be used by
1058 * future calls to this function).
1059 *
1060 * On failure, returns false and sets 'reply->data' to NULL and
1061 * 'reply->size' to 0. Failure might indicate an actual error or merely
1062 * the end of replies. An error status for the entire dump operation is
1063 * provided when it is completed by calling nl_dump_done().
1064 *
1065 * Multiple threads may call this function, passing the same nl_dump, however
1066 * each must provide independent buffers. This function may cache multiple
1067 * replies in the buffer, and these will be processed before more replies are
1068 * fetched. When this function returns false, other threads may continue to
1069 * process replies in their buffers, but they will not fetch more replies.
1070 */
1071 bool
1072 nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply, struct ofpbuf *buffer)
1073 {
1074 int retval = 0;
1075
1076 /* If the buffer is empty, refill it.
1077 *
1078 * If the buffer is not empty, we don't check the dump's status.
1079 * Otherwise, we could end up skipping some of the dump results if thread A
1080 * hits EOF while thread B is in the midst of processing a batch. */
1081 if (!buffer->size) {
1082 ovs_mutex_lock(&dump->mutex);
1083 if (!dump->status) {
1084 /* Take the mutex here to avoid an in-kernel race. If two threads
1085 * try to read from a Netlink dump socket at once, then the socket
1086 * error can be set to EINVAL, which will be encountered on the
1087 * next recv on that socket, which could be anywhere due to the way
1088 * that we pool Netlink sockets. Serializing the recv calls avoids
1089 * the issue. */
1090 dump->status = nl_dump_refill(dump, buffer);
1091 }
1092 retval = dump->status;
1093 ovs_mutex_unlock(&dump->mutex);
1094 }
1095
1096 /* Fetch the next message from the buffer. */
1097 if (!retval) {
1098 retval = nl_dump_next__(reply, buffer);
1099 if (retval) {
1100 /* Record 'retval' as the dump status, but don't overwrite an error
1101 * with EOF. */
1102 ovs_mutex_lock(&dump->mutex);
1103 if (dump->status <= 0) {
1104 dump->status = retval;
1105 }
1106 ovs_mutex_unlock(&dump->mutex);
1107 }
1108 }
1109
1110 if (retval) {
1111 reply->data = NULL;
1112 reply->size = 0;
1113 }
1114 return !retval;
1115 }
1116
1117 /* Completes Netlink dump operation 'dump', which must have been initialized
1118 * with nl_dump_start(). Returns 0 if the dump operation was error-free,
1119 * otherwise a positive errno value describing the problem. */
1120 int
1121 nl_dump_done(struct nl_dump *dump)
1122 {
1123 int status;
1124
1125 ovs_mutex_lock(&dump->mutex);
1126 status = dump->status;
1127 ovs_mutex_unlock(&dump->mutex);
1128
1129 /* Drain any remaining messages that the client didn't read. Otherwise the
1130 * kernel will continue to queue them up and waste buffer space.
1131 *
1132 * XXX We could just destroy and discard the socket in this case. */
1133 if (!status) {
1134 uint64_t tmp_reply_stub[NL_DUMP_BUFSIZE / 8];
1135 struct ofpbuf reply, buf;
1136
1137 ofpbuf_use_stub(&buf, tmp_reply_stub, sizeof tmp_reply_stub);
1138 while (nl_dump_next(dump, &reply, &buf)) {
1139 /* Nothing to do. */
1140 }
1141 ofpbuf_uninit(&buf);
1142
1143 ovs_mutex_lock(&dump->mutex);
1144 status = dump->status;
1145 ovs_mutex_unlock(&dump->mutex);
1146 ovs_assert(status);
1147 }
1148
1149 nl_pool_release(dump->sock);
1150 ovs_mutex_destroy(&dump->mutex);
1151
1152 return status == EOF ? 0 : status;
1153 }
1154
1155 #ifdef _WIN32
1156 /* Pend an I/O request in the driver. The driver completes the I/O whenever
1157 * an event or a packet is ready to be read. Once the I/O is completed
1158 * the overlapped structure event associated with the pending I/O will be set
1159 */
1160 static int
1161 pend_io_request(struct nl_sock *sock)
1162 {
1163 struct ofpbuf request;
1164 uint64_t request_stub[128];
1165 struct ovs_header *ovs_header;
1166 struct nlmsghdr *nlmsg;
1167 uint32_t seq;
1168 int retval = 0;
1169 int error;
1170 DWORD bytes;
1171 OVERLAPPED *overlapped = CONST_CAST(OVERLAPPED *, &sock->overlapped);
1172 uint16_t cmd = OVS_CTRL_CMD_WIN_PEND_PACKET_REQ;
1173
1174 ovs_assert(sock->read_ioctl == OVS_IOCTL_READ_PACKET ||
1175 sock->read_ioctl == OVS_IOCTL_READ_EVENT);
1176 if (sock->read_ioctl == OVS_IOCTL_READ_EVENT) {
1177 cmd = OVS_CTRL_CMD_WIN_PEND_REQ;
1178 }
1179
1180 int ovs_msg_size = sizeof (struct nlmsghdr) + sizeof (struct genlmsghdr) +
1181 sizeof (struct ovs_header);
1182
1183 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
1184
1185 seq = nl_sock_allocate_seq(sock, 1);
1186 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
1187 cmd, OVS_WIN_CONTROL_VERSION);
1188 nlmsg = nl_msg_nlmsghdr(&request);
1189 nlmsg->nlmsg_seq = seq;
1190 nlmsg->nlmsg_pid = sock->pid;
1191
1192 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
1193 ovs_header->dp_ifindex = 0;
1194
1195 if (!DeviceIoControl(sock->handle, OVS_IOCTL_WRITE,
1196 request.data, request.size,
1197 NULL, 0, &bytes, overlapped)) {
1198 error = GetLastError();
1199 /* Check if the I/O got pended */
1200 if (error != ERROR_IO_INCOMPLETE && error != ERROR_IO_PENDING) {
1201 VLOG_ERR("nl_sock_wait failed - %s\n", ovs_format_message(error));
1202 retval = EINVAL;
1203 }
1204 } else {
1205 retval = EAGAIN;
1206 }
1207
1208 done:
1209 ofpbuf_uninit(&request);
1210 return retval;
1211 }
1212 #endif /* _WIN32 */
1213
1214 /* Causes poll_block() to wake up when any of the specified 'events' (which is
1215 * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'.
1216 * On Windows, 'sock' is not treated as const, and may be modified. */
1217 void
1218 nl_sock_wait(const struct nl_sock *sock, short int events)
1219 {
1220 #ifdef _WIN32
1221 if (sock->overlapped.Internal != STATUS_PENDING) {
1222 int ret = pend_io_request(CONST_CAST(struct nl_sock *, sock));
1223 if (ret == 0) {
1224 poll_wevent_wait(sock->overlapped.hEvent);
1225 } else {
1226 poll_immediate_wake();
1227 }
1228 } else {
1229 poll_wevent_wait(sock->overlapped.hEvent);
1230 }
1231 #else
1232 poll_fd_wait(sock->fd, events);
1233 #endif
1234 }
1235
1236 #ifndef _WIN32
1237 /* Returns the underlying fd for 'sock', for use in "poll()"-like operations
1238 * that can't use nl_sock_wait().
1239 *
1240 * It's a little tricky to use the returned fd correctly, because nl_sock does
1241 * "copy on write" to allow a single nl_sock to be used for notifications,
1242 * transactions, and dumps. If 'sock' is used only for notifications and
1243 * transactions (and never for dump) then the usage is safe. */
1244 int
1245 nl_sock_fd(const struct nl_sock *sock)
1246 {
1247 return sock->fd;
1248 }
1249 #endif
1250
1251 /* Returns the PID associated with this socket. */
1252 uint32_t
1253 nl_sock_pid(const struct nl_sock *sock)
1254 {
1255 return sock->pid;
1256 }
1257 \f
1258 /* Miscellaneous. */
1259
1260 struct genl_family {
1261 struct hmap_node hmap_node;
1262 uint16_t id;
1263 char *name;
1264 };
1265
1266 static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
1267
1268 static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
1269 [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
1270 [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
1271 };
1272
1273 static struct genl_family *
1274 find_genl_family_by_id(uint16_t id)
1275 {
1276 struct genl_family *family;
1277
1278 HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
1279 &genl_families) {
1280 if (family->id == id) {
1281 return family;
1282 }
1283 }
1284 return NULL;
1285 }
1286
1287 static void
1288 define_genl_family(uint16_t id, const char *name)
1289 {
1290 struct genl_family *family = find_genl_family_by_id(id);
1291
1292 if (family) {
1293 if (!strcmp(family->name, name)) {
1294 return;
1295 }
1296 free(family->name);
1297 } else {
1298 family = xmalloc(sizeof *family);
1299 family->id = id;
1300 hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
1301 }
1302 family->name = xstrdup(name);
1303 }
1304
1305 static const char *
1306 genl_family_to_name(uint16_t id)
1307 {
1308 if (id == GENL_ID_CTRL) {
1309 return "control";
1310 } else {
1311 struct genl_family *family = find_genl_family_by_id(id);
1312 return family ? family->name : "unknown";
1313 }
1314 }
1315
1316 #ifndef _WIN32
1317 static int
1318 do_lookup_genl_family(const char *name, struct nlattr **attrs,
1319 struct ofpbuf **replyp)
1320 {
1321 struct nl_sock *sock;
1322 struct ofpbuf request, *reply;
1323 int error;
1324
1325 *replyp = NULL;
1326 error = nl_sock_create(NETLINK_GENERIC, &sock);
1327 if (error) {
1328 return error;
1329 }
1330
1331 ofpbuf_init(&request, 0);
1332 nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
1333 CTRL_CMD_GETFAMILY, 1);
1334 nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
1335 error = nl_sock_transact(sock, &request, &reply);
1336 ofpbuf_uninit(&request);
1337 if (error) {
1338 nl_sock_destroy(sock);
1339 return error;
1340 }
1341
1342 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
1343 family_policy, attrs, ARRAY_SIZE(family_policy))
1344 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
1345 nl_sock_destroy(sock);
1346 ofpbuf_delete(reply);
1347 return EPROTO;
1348 }
1349
1350 nl_sock_destroy(sock);
1351 *replyp = reply;
1352 return 0;
1353 }
1354 #else
1355 static int
1356 do_lookup_genl_family(const char *name, struct nlattr **attrs,
1357 struct ofpbuf **replyp)
1358 {
1359 struct nlmsghdr *nlmsg;
1360 struct ofpbuf *reply;
1361 int error;
1362 uint16_t family_id;
1363 const char *family_name;
1364 uint32_t family_version;
1365 uint32_t family_attrmax;
1366 uint32_t mcgrp_id = OVS_WIN_NL_INVALID_MCGRP_ID;
1367 const char *mcgrp_name = NULL;
1368
1369 *replyp = NULL;
1370 reply = ofpbuf_new(1024);
1371
1372 /* CTRL_ATTR_MCAST_GROUPS is supported only for VPORT family. */
1373 if (!strcmp(name, OVS_WIN_CONTROL_FAMILY)) {
1374 family_id = OVS_WIN_NL_CTRL_FAMILY_ID;
1375 family_name = OVS_WIN_CONTROL_FAMILY;
1376 family_version = OVS_WIN_CONTROL_VERSION;
1377 family_attrmax = OVS_WIN_CONTROL_ATTR_MAX;
1378 } else if (!strcmp(name, OVS_DATAPATH_FAMILY)) {
1379 family_id = OVS_WIN_NL_DATAPATH_FAMILY_ID;
1380 family_name = OVS_DATAPATH_FAMILY;
1381 family_version = OVS_DATAPATH_VERSION;
1382 family_attrmax = OVS_DP_ATTR_MAX;
1383 } else if (!strcmp(name, OVS_PACKET_FAMILY)) {
1384 family_id = OVS_WIN_NL_PACKET_FAMILY_ID;
1385 family_name = OVS_PACKET_FAMILY;
1386 family_version = OVS_PACKET_VERSION;
1387 family_attrmax = OVS_PACKET_ATTR_MAX;
1388 } else if (!strcmp(name, OVS_VPORT_FAMILY)) {
1389 family_id = OVS_WIN_NL_VPORT_FAMILY_ID;
1390 family_name = OVS_VPORT_FAMILY;
1391 family_version = OVS_VPORT_VERSION;
1392 family_attrmax = OVS_VPORT_ATTR_MAX;
1393 mcgrp_id = OVS_WIN_NL_VPORT_MCGRP_ID;
1394 mcgrp_name = OVS_VPORT_MCGROUP;
1395 } else if (!strcmp(name, OVS_FLOW_FAMILY)) {
1396 family_id = OVS_WIN_NL_FLOW_FAMILY_ID;
1397 family_name = OVS_FLOW_FAMILY;
1398 family_version = OVS_FLOW_VERSION;
1399 family_attrmax = OVS_FLOW_ATTR_MAX;
1400 } else if (!strcmp(name, OVS_WIN_NETDEV_FAMILY)) {
1401 family_id = OVS_WIN_NL_NETDEV_FAMILY_ID;
1402 family_name = OVS_WIN_NETDEV_FAMILY;
1403 family_version = OVS_WIN_NETDEV_VERSION;
1404 family_attrmax = OVS_WIN_NETDEV_ATTR_MAX;
1405 } else {
1406 ofpbuf_delete(reply);
1407 return EINVAL;
1408 }
1409
1410 nl_msg_put_genlmsghdr(reply, 0, GENL_ID_CTRL, 0,
1411 CTRL_CMD_NEWFAMILY, family_version);
1412 /* CTRL_ATTR_HDRSIZE and CTRL_ATTR_OPS are not populated, but the
1413 * callers do not seem to need them. */
1414 nl_msg_put_u16(reply, CTRL_ATTR_FAMILY_ID, family_id);
1415 nl_msg_put_string(reply, CTRL_ATTR_FAMILY_NAME, family_name);
1416 nl_msg_put_u32(reply, CTRL_ATTR_VERSION, family_version);
1417 nl_msg_put_u32(reply, CTRL_ATTR_MAXATTR, family_attrmax);
1418
1419 if (mcgrp_id != OVS_WIN_NL_INVALID_MCGRP_ID) {
1420 size_t mcgrp_ofs1 = nl_msg_start_nested(reply, CTRL_ATTR_MCAST_GROUPS);
1421 size_t mcgrp_ofs2= nl_msg_start_nested(reply,
1422 OVS_WIN_NL_VPORT_MCGRP_ID - OVS_WIN_NL_MCGRP_START_ID);
1423 nl_msg_put_u32(reply, CTRL_ATTR_MCAST_GRP_ID, mcgrp_id);
1424 ovs_assert(mcgrp_name != NULL);
1425 nl_msg_put_string(reply, CTRL_ATTR_MCAST_GRP_NAME, mcgrp_name);
1426 nl_msg_end_nested(reply, mcgrp_ofs2);
1427 nl_msg_end_nested(reply, mcgrp_ofs1);
1428 }
1429
1430 /* Set the total length of the netlink message. */
1431 nlmsg = nl_msg_nlmsghdr(reply);
1432 nlmsg->nlmsg_len = reply->size;
1433
1434 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
1435 family_policy, attrs, ARRAY_SIZE(family_policy))
1436 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
1437 ofpbuf_delete(reply);
1438 return EPROTO;
1439 }
1440
1441 *replyp = reply;
1442 return 0;
1443 }
1444 #endif
1445
1446 /* Finds the multicast group called 'group_name' in genl family 'family_name'.
1447 * When successful, writes its result to 'multicast_group' and returns 0.
1448 * Otherwise, clears 'multicast_group' and returns a positive error code.
1449 */
1450 int
1451 nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
1452 unsigned int *multicast_group)
1453 {
1454 struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
1455 const struct nlattr *mc;
1456 struct ofpbuf *reply;
1457 unsigned int left;
1458 int error;
1459
1460 *multicast_group = 0;
1461 error = do_lookup_genl_family(family_name, family_attrs, &reply);
1462 if (error) {
1463 return error;
1464 }
1465
1466 if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1467 error = EPROTO;
1468 goto exit;
1469 }
1470
1471 NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1472 static const struct nl_policy mc_policy[] = {
1473 [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
1474 [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
1475 };
1476
1477 struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
1478 const char *mc_name;
1479
1480 if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
1481 error = EPROTO;
1482 goto exit;
1483 }
1484
1485 mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
1486 if (!strcmp(group_name, mc_name)) {
1487 *multicast_group =
1488 nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
1489 error = 0;
1490 goto exit;
1491 }
1492 }
1493 error = EPROTO;
1494
1495 exit:
1496 ofpbuf_delete(reply);
1497 return error;
1498 }
1499
1500 /* If '*number' is 0, translates the given Generic Netlink family 'name' to a
1501 * number and stores it in '*number'. If successful, returns 0 and the caller
1502 * may use '*number' as the family number. On failure, returns a positive
1503 * errno value and '*number' caches the errno value. */
1504 int
1505 nl_lookup_genl_family(const char *name, int *number)
1506 {
1507 if (*number == 0) {
1508 struct nlattr *attrs[ARRAY_SIZE(family_policy)];
1509 struct ofpbuf *reply;
1510 int error;
1511
1512 error = do_lookup_genl_family(name, attrs, &reply);
1513 if (!error) {
1514 *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
1515 define_genl_family(*number, name);
1516 } else {
1517 *number = -error;
1518 }
1519 ofpbuf_delete(reply);
1520
1521 ovs_assert(*number != 0);
1522 }
1523 return *number > 0 ? 0 : -*number;
1524 }
1525 \f
1526 struct nl_pool {
1527 struct nl_sock *socks[16];
1528 int n;
1529 };
1530
1531 static struct ovs_mutex pool_mutex = OVS_MUTEX_INITIALIZER;
1532 static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
1533
1534 static int
1535 nl_pool_alloc(int protocol, struct nl_sock **sockp)
1536 {
1537 struct nl_sock *sock = NULL;
1538 struct nl_pool *pool;
1539
1540 ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
1541
1542 ovs_mutex_lock(&pool_mutex);
1543 pool = &pools[protocol];
1544 if (pool->n > 0) {
1545 sock = pool->socks[--pool->n];
1546 }
1547 ovs_mutex_unlock(&pool_mutex);
1548
1549 if (sock) {
1550 *sockp = sock;
1551 return 0;
1552 } else {
1553 return nl_sock_create(protocol, sockp);
1554 }
1555 }
1556
1557 static void
1558 nl_pool_release(struct nl_sock *sock)
1559 {
1560 if (sock) {
1561 struct nl_pool *pool = &pools[sock->protocol];
1562
1563 ovs_mutex_lock(&pool_mutex);
1564 if (pool->n < ARRAY_SIZE(pool->socks)) {
1565 pool->socks[pool->n++] = sock;
1566 sock = NULL;
1567 }
1568 ovs_mutex_unlock(&pool_mutex);
1569
1570 nl_sock_destroy(sock);
1571 }
1572 }
1573
1574 /* Sends 'request' to the kernel on a Netlink socket for the given 'protocol'
1575 * (e.g. NETLINK_ROUTE or NETLINK_GENERIC) and waits for a response. If
1576 * successful, returns 0. On failure, returns a positive errno value.
1577 *
1578 * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
1579 * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
1580 * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
1581 * reply, if any, is discarded.
1582 *
1583 * Before the message is sent, nlmsg_len in 'request' will be finalized to
1584 * match msg->size, nlmsg_pid will be set to the pid of the socket used
1585 * for sending the request, and nlmsg_seq will be initialized.
1586 *
1587 * The caller is responsible for destroying 'request'.
1588 *
1589 * Bare Netlink is an unreliable transport protocol. This function layers
1590 * reliable delivery and reply semantics on top of bare Netlink.
1591 *
1592 * In Netlink, sending a request to the kernel is reliable enough, because the
1593 * kernel will tell us if the message cannot be queued (and we will in that
1594 * case put it on the transmit queue and wait until it can be delivered).
1595 *
1596 * Receiving the reply is the real problem: if the socket buffer is full when
1597 * the kernel tries to send the reply, the reply will be dropped. However, the
1598 * kernel sets a flag that a reply has been dropped. The next call to recv
1599 * then returns ENOBUFS. We can then re-send the request.
1600 *
1601 * Caveats:
1602 *
1603 * 1. Netlink depends on sequence numbers to match up requests and
1604 * replies. The sender of a request supplies a sequence number, and
1605 * the reply echos back that sequence number.
1606 *
1607 * This is fine, but (1) some kernel netlink implementations are
1608 * broken, in that they fail to echo sequence numbers and (2) this
1609 * function will drop packets with non-matching sequence numbers, so
1610 * that only a single request can be usefully transacted at a time.
1611 *
1612 * 2. Resending the request causes it to be re-executed, so the request
1613 * needs to be idempotent.
1614 */
1615 int
1616 nl_transact(int protocol, const struct ofpbuf *request,
1617 struct ofpbuf **replyp)
1618 {
1619 struct nl_sock *sock;
1620 int error;
1621
1622 error = nl_pool_alloc(protocol, &sock);
1623 if (error) {
1624 *replyp = NULL;
1625 return error;
1626 }
1627
1628 error = nl_sock_transact(sock, request, replyp);
1629
1630 nl_pool_release(sock);
1631 return error;
1632 }
1633
1634 /* Sends the 'request' member of the 'n' transactions in 'transactions' on a
1635 * Netlink socket for the given 'protocol' (e.g. NETLINK_ROUTE or
1636 * NETLINK_GENERIC), in order, and receives responses to all of them. Fills in
1637 * the 'error' member of each transaction with 0 if it was successful,
1638 * otherwise with a positive errno value. If 'reply' is nonnull, then it will
1639 * be filled with the reply if the message receives a detailed reply. In other
1640 * cases, i.e. where the request failed or had no reply beyond an indication of
1641 * success, 'reply' will be cleared if it is nonnull.
1642 *
1643 * The caller is responsible for destroying each request and reply, and the
1644 * transactions array itself.
1645 *
1646 * Before sending each message, this function will finalize nlmsg_len in each
1647 * 'request' to match the ofpbuf's size, set nlmsg_pid to the pid of the socket
1648 * used for the transaction, and initialize nlmsg_seq.
1649 *
1650 * Bare Netlink is an unreliable transport protocol. This function layers
1651 * reliable delivery and reply semantics on top of bare Netlink. See
1652 * nl_transact() for some caveats.
1653 */
1654 void
1655 nl_transact_multiple(int protocol,
1656 struct nl_transaction **transactions, size_t n)
1657 {
1658 struct nl_sock *sock;
1659 int error;
1660
1661 error = nl_pool_alloc(protocol, &sock);
1662 if (!error) {
1663 nl_sock_transact_multiple(sock, transactions, n);
1664 nl_pool_release(sock);
1665 } else {
1666 nl_sock_record_errors__(transactions, n, error);
1667 }
1668 }
1669
1670 \f
1671 static uint32_t
1672 nl_sock_allocate_seq(struct nl_sock *sock, unsigned int n)
1673 {
1674 uint32_t seq = sock->next_seq;
1675
1676 sock->next_seq += n;
1677
1678 /* Make it impossible for the next request for sequence numbers to wrap
1679 * around to 0. Start over with 1 to avoid ever using a sequence number of
1680 * 0, because the kernel uses sequence number 0 for notifications. */
1681 if (sock->next_seq >= UINT32_MAX / 2) {
1682 sock->next_seq = 1;
1683 }
1684
1685 return seq;
1686 }
1687
1688 static void
1689 nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
1690 {
1691 struct nlmsg_flag {
1692 unsigned int bits;
1693 const char *name;
1694 };
1695 static const struct nlmsg_flag flags[] = {
1696 { NLM_F_REQUEST, "REQUEST" },
1697 { NLM_F_MULTI, "MULTI" },
1698 { NLM_F_ACK, "ACK" },
1699 { NLM_F_ECHO, "ECHO" },
1700 { NLM_F_DUMP, "DUMP" },
1701 { NLM_F_ROOT, "ROOT" },
1702 { NLM_F_MATCH, "MATCH" },
1703 { NLM_F_ATOMIC, "ATOMIC" },
1704 };
1705 const struct nlmsg_flag *flag;
1706 uint16_t flags_left;
1707
1708 ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
1709 h->nlmsg_len, h->nlmsg_type);
1710 if (h->nlmsg_type == NLMSG_NOOP) {
1711 ds_put_cstr(ds, "(no-op)");
1712 } else if (h->nlmsg_type == NLMSG_ERROR) {
1713 ds_put_cstr(ds, "(error)");
1714 } else if (h->nlmsg_type == NLMSG_DONE) {
1715 ds_put_cstr(ds, "(done)");
1716 } else if (h->nlmsg_type == NLMSG_OVERRUN) {
1717 ds_put_cstr(ds, "(overrun)");
1718 } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
1719 ds_put_cstr(ds, "(reserved)");
1720 } else if (protocol == NETLINK_GENERIC) {
1721 ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
1722 } else {
1723 ds_put_cstr(ds, "(family-defined)");
1724 }
1725 ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
1726 flags_left = h->nlmsg_flags;
1727 for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
1728 if ((flags_left & flag->bits) == flag->bits) {
1729 ds_put_format(ds, "[%s]", flag->name);
1730 flags_left &= ~flag->bits;
1731 }
1732 }
1733 if (flags_left) {
1734 ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
1735 }
1736 ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
1737 h->nlmsg_seq, h->nlmsg_pid);
1738 }
1739
1740 static char *
1741 nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
1742 {
1743 struct ds ds = DS_EMPTY_INITIALIZER;
1744 const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
1745 if (h) {
1746 nlmsghdr_to_string(h, protocol, &ds);
1747 if (h->nlmsg_type == NLMSG_ERROR) {
1748 const struct nlmsgerr *e;
1749 e = ofpbuf_at(buffer, NLMSG_HDRLEN,
1750 NLMSG_ALIGN(sizeof(struct nlmsgerr)));
1751 if (e) {
1752 ds_put_format(&ds, " error(%d", e->error);
1753 if (e->error < 0) {
1754 ds_put_format(&ds, "(%s)", ovs_strerror(-e->error));
1755 }
1756 ds_put_cstr(&ds, ", in-reply-to(");
1757 nlmsghdr_to_string(&e->msg, protocol, &ds);
1758 ds_put_cstr(&ds, "))");
1759 } else {
1760 ds_put_cstr(&ds, " error(truncated)");
1761 }
1762 } else if (h->nlmsg_type == NLMSG_DONE) {
1763 int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
1764 if (error) {
1765 ds_put_format(&ds, " done(%d", *error);
1766 if (*error < 0) {
1767 ds_put_format(&ds, "(%s)", ovs_strerror(-*error));
1768 }
1769 ds_put_cstr(&ds, ")");
1770 } else {
1771 ds_put_cstr(&ds, " done(truncated)");
1772 }
1773 } else if (protocol == NETLINK_GENERIC) {
1774 struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
1775 if (genl) {
1776 ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
1777 genl->cmd, genl->version);
1778 }
1779 }
1780 } else {
1781 ds_put_cstr(&ds, "nl(truncated)");
1782 }
1783 return ds.string;
1784 }
1785
1786 static void
1787 log_nlmsg(const char *function, int error,
1788 const void *message, size_t size, int protocol)
1789 {
1790 if (!VLOG_IS_DBG_ENABLED()) {
1791 return;
1792 }
1793
1794 struct ofpbuf buffer = ofpbuf_const_initializer(message, size);
1795 char *nlmsg = nlmsg_to_string(&buffer, protocol);
1796 VLOG_DBG_RL(&rl, "%s (%s): %s", function, ovs_strerror(error), nlmsg);
1797 free(nlmsg);
1798 }