]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netlink-socket.c
ovn: Connect to remote lports through localnet port.
[mirror_ovs.git] / lib / netlink-socket.c
CommitLineData
2fe27d5a 1/*
0a2869d5 2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
2fe27d5a
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18#include "netlink-socket.h"
2fe27d5a
BP
19#include <errno.h>
20#include <inttypes.h>
21#include <stdlib.h>
22#include <sys/types.h>
cc75061a 23#include <sys/uio.h>
2fe27d5a
BP
24#include <unistd.h>
25#include "coverage.h"
26#include "dynamic-string.h"
2ad204c8
BP
27#include "hash.h"
28#include "hmap.h"
2fe27d5a
BP
29#include "netlink.h"
30#include "netlink-protocol.h"
886dd35a 31#include "odp-netlink.h"
2fe27d5a 32#include "ofpbuf.h"
0bd01224 33#include "ovs-thread.h"
2fe27d5a 34#include "poll-loop.h"
0672776e 35#include "seq.h"
6b7c12fd 36#include "socket-util.h"
cc75061a 37#include "util.h"
e6211adc 38#include "openvswitch/vlog.h"
2fe27d5a
BP
39
40VLOG_DEFINE_THIS_MODULE(netlink_socket);
41
42COVERAGE_DEFINE(netlink_overflow);
43COVERAGE_DEFINE(netlink_received);
fc999dda 44COVERAGE_DEFINE(netlink_recv_jumbo);
2fe27d5a
BP
45COVERAGE_DEFINE(netlink_sent);
46
47/* Linux header file confusion causes this to be undefined. */
48#ifndef SOL_NETLINK
49#define SOL_NETLINK 270
50#endif
51
22326ba6
AS
52#ifdef _WIN32
53static struct ovs_mutex portid_mutex = OVS_MUTEX_INITIALIZER;
54static uint32_t g_last_portid = 0;
55
56/* Port IDs must be unique! */
57static uint32_t
58portid_next(void)
59 OVS_GUARDED_BY(portid_mutex)
60{
61 g_last_portid++;
62 return g_last_portid;
63}
886dd35a 64#endif /* _WIN32 */
22326ba6 65
2fe27d5a
BP
66/* A single (bad) Netlink message can in theory dump out many, many log
67 * messages, so the burst size is set quite high here to avoid missing useful
68 * information. Also, at high logging levels we log *all* Netlink messages. */
69static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
70
7d7447df 71static uint32_t nl_sock_allocate_seq(struct nl_sock *, unsigned int n);
2fe27d5a 72static void log_nlmsg(const char *function, int error,
7041c3a9 73 const void *message, size_t size, int protocol);
886dd35a 74#ifdef _WIN32
ebac7fb7 75static int get_sock_pid_from_kernel(struct nl_sock *sock);
886dd35a 76#endif
2fe27d5a
BP
77\f
78/* Netlink sockets. */
79
0d121c73 80struct nl_sock {
22326ba6
AS
81#ifdef _WIN32
82 HANDLE handle;
7fa09611 83 OVERLAPPED overlapped;
64513e68 84 DWORD read_ioctl;
22326ba6 85#else
2fe27d5a 86 int fd;
22326ba6 87#endif
7d7447df 88 uint32_t next_seq;
2fe27d5a 89 uint32_t pid;
7041c3a9 90 int protocol;
cc75061a 91 unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
2fe27d5a
BP
92};
93
cc75061a
BP
94/* Compile-time limit on iovecs, so that we can allocate a maximum-size array
95 * of iovecs on the stack. */
96#define MAX_IOVS 128
97
98/* Maximum number of iovecs that may be passed to sendmsg, capped at a
99 * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
100 *
101 * Initialized by nl_sock_create(). */
102static int max_iovs;
103
a88b4e04
BP
104static int nl_pool_alloc(int protocol, struct nl_sock **sockp);
105static void nl_pool_release(struct nl_sock *);
2fe27d5a
BP
106
107/* Creates a new netlink socket for the given netlink 'protocol'
108 * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
a88b4e04 109 * new socket if successful, otherwise returns a positive errno value. */
2fe27d5a 110int
cceb11f5 111nl_sock_create(int protocol, struct nl_sock **sockp)
2fe27d5a 112{
0bd01224 113 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2fe27d5a 114 struct nl_sock *sock;
22326ba6 115#ifndef _WIN32
2fe27d5a 116 struct sockaddr_nl local, remote;
22326ba6 117#endif
2c5a6834 118 socklen_t local_size;
d2b9f5b0 119 int rcvbuf;
2fe27d5a
BP
120 int retval = 0;
121
0bd01224 122 if (ovsthread_once_start(&once)) {
cc75061a
BP
123 int save_errno = errno;
124 errno = 0;
125
126 max_iovs = sysconf(_SC_UIO_MAXIOV);
127 if (max_iovs < _XOPEN_IOV_MAX) {
128 if (max_iovs == -1 && errno) {
10a89ef0 129 VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", ovs_strerror(errno));
cc75061a
BP
130 }
131 max_iovs = _XOPEN_IOV_MAX;
132 } else if (max_iovs > MAX_IOVS) {
133 max_iovs = MAX_IOVS;
134 }
135
136 errno = save_errno;
0bd01224 137 ovsthread_once_done(&once);
cc75061a
BP
138 }
139
2fe27d5a 140 *sockp = NULL;
488232b7 141 sock = xmalloc(sizeof *sock);
2fe27d5a 142
22326ba6 143#ifdef _WIN32
7fa09611
EE
144 sock->handle = CreateFile(OVS_DEVICE_NAME_USER,
145 GENERIC_READ | GENERIC_WRITE,
146 FILE_SHARE_READ | FILE_SHARE_WRITE,
147 NULL, OPEN_EXISTING,
148 FILE_FLAG_OVERLAPPED, NULL);
22326ba6
AS
149
150 if (sock->handle == INVALID_HANDLE_VALUE) {
52a1540a 151 VLOG_ERR("fcntl: %s", ovs_lasterror_to_string());
7fa09611
EE
152 goto error;
153 }
154
155 memset(&sock->overlapped, 0, sizeof sock->overlapped);
156 sock->overlapped.hEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
157 if (sock->overlapped.hEvent == NULL) {
52a1540a 158 VLOG_ERR("fcntl: %s", ovs_lasterror_to_string());
22326ba6
AS
159 goto error;
160 }
64513e68
EE
161 /* Initialize the type/ioctl to Generic */
162 sock->read_ioctl = OVS_IOCTL_READ;
22326ba6 163#else
2fe27d5a
BP
164 sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
165 if (sock->fd < 0) {
10a89ef0 166 VLOG_ERR("fcntl: %s", ovs_strerror(errno));
2fe27d5a
BP
167 goto error;
168 }
22326ba6
AS
169#endif
170
7041c3a9 171 sock->protocol = protocol;
7d7447df 172 sock->next_seq = 1;
2fe27d5a 173
d2b9f5b0 174 rcvbuf = 1024 * 1024;
22326ba6
AS
175#ifdef _WIN32
176 sock->rcvbuf = rcvbuf;
ebac7fb7 177 retval = get_sock_pid_from_kernel(sock);
886dd35a
NR
178 if (retval != 0) {
179 goto error;
180 }
22326ba6 181#else
d2b9f5b0
BP
182 if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
183 &rcvbuf, sizeof rcvbuf)) {
80af5ee5
BP
184 /* Only root can use SO_RCVBUFFORCE. Everyone else gets EPERM.
185 * Warn only if the failure is therefore unexpected. */
f28b6dd3 186 if (errno != EPERM) {
80af5ee5 187 VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed "
10a89ef0 188 "(%s)", rcvbuf, ovs_strerror(errno));
80af5ee5 189 }
d2b9f5b0
BP
190 }
191
cc75061a
BP
192 retval = get_socket_rcvbuf(sock->fd);
193 if (retval < 0) {
194 retval = -retval;
195 goto error;
196 }
197 sock->rcvbuf = retval;
2e467950 198 retval = 0;
cc75061a 199
2c5a6834 200 /* Connect to kernel (pid 0) as remote address. */
2fe27d5a
BP
201 memset(&remote, 0, sizeof remote);
202 remote.nl_family = AF_NETLINK;
203 remote.nl_pid = 0;
204 if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
10a89ef0 205 VLOG_ERR("connect(0): %s", ovs_strerror(errno));
2c5a6834
BP
206 goto error;
207 }
208
209 /* Obtain pid assigned by kernel. */
210 local_size = sizeof local;
211 if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
10a89ef0 212 VLOG_ERR("getsockname: %s", ovs_strerror(errno));
2c5a6834
BP
213 goto error;
214 }
215 if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
216 VLOG_ERR("getsockname returned bad Netlink name");
217 retval = EINVAL;
218 goto error;
2fe27d5a 219 }
2c5a6834 220 sock->pid = local.nl_pid;
22326ba6 221#endif
2fe27d5a 222
2fe27d5a
BP
223 *sockp = sock;
224 return 0;
225
2fe27d5a
BP
226error:
227 if (retval == 0) {
228 retval = errno;
229 if (retval == 0) {
230 retval = EINVAL;
231 }
232 }
22326ba6 233#ifdef _WIN32
7fa09611
EE
234 if (sock->overlapped.hEvent) {
235 CloseHandle(sock->overlapped.hEvent);
236 }
22326ba6
AS
237 if (sock->handle != INVALID_HANDLE_VALUE) {
238 CloseHandle(sock->handle);
239 }
240#else
2fe27d5a
BP
241 if (sock->fd >= 0) {
242 close(sock->fd);
243 }
22326ba6 244#endif
2fe27d5a
BP
245 free(sock);
246 return retval;
247}
248
c6eab56d
BP
249/* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
250 * sets '*sockp' to the new socket if successful, otherwise returns a positive
251 * errno value. */
252int
253nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
254{
255 return nl_sock_create(src->protocol, sockp);
256}
257
2fe27d5a
BP
258/* Destroys netlink socket 'sock'. */
259void
260nl_sock_destroy(struct nl_sock *sock)
261{
262 if (sock) {
22326ba6 263#ifdef _WIN32
7fa09611
EE
264 if (sock->overlapped.hEvent) {
265 CloseHandle(sock->overlapped.hEvent);
266 }
22326ba6
AS
267 CloseHandle(sock->handle);
268#else
a88b4e04 269 close(sock->fd);
22326ba6 270#endif
a88b4e04 271 free(sock);
2fe27d5a
BP
272 }
273}
274
886dd35a
NR
275#ifdef _WIN32
276/* Reads the pid for 'sock' generated in the kernel datapath. The function
190cf533
SV
277 * uses a separate IOCTL instead of a transaction semantic to avoid unnecessary
278 * message overhead. */
886dd35a 279static int
ebac7fb7 280get_sock_pid_from_kernel(struct nl_sock *sock)
886dd35a 281{
190cf533
SV
282 uint32_t pid = 0;
283 int retval = 0;
284 DWORD bytes = 0;
886dd35a 285
190cf533
SV
286 if (!DeviceIoControl(sock->handle, OVS_IOCTL_GET_PID,
287 NULL, 0, &pid, sizeof(pid),
886dd35a
NR
288 &bytes, NULL)) {
289 retval = EINVAL;
886dd35a 290 } else {
190cf533 291 if (bytes < sizeof(pid)) {
886dd35a 292 retval = EINVAL;
190cf533
SV
293 } else {
294 sock->pid = pid;
886dd35a 295 }
886dd35a 296 }
886dd35a 297
886dd35a
NR
298 return retval;
299}
300#endif /* _WIN32 */
301
b8f958ea
EE
302#ifdef _WIN32
303static int __inline
304nl_sock_mcgroup(struct nl_sock *sock, unsigned int multicast_group, bool join)
305{
306 struct ofpbuf request;
307 uint64_t request_stub[128];
308 struct ovs_header *ovs_header;
309 struct nlmsghdr *nlmsg;
310 int error;
311
312 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
313
314 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
315 OVS_CTRL_CMD_MC_SUBSCRIBE_REQ,
316 OVS_WIN_CONTROL_VERSION);
317
318 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
319 ovs_header->dp_ifindex = 0;
320
321 nl_msg_put_u32(&request, OVS_NL_ATTR_MCAST_GRP, multicast_group);
322 nl_msg_put_u8(&request, OVS_NL_ATTR_MCAST_JOIN, join ? 1 : 0);
323
324 error = nl_sock_send(sock, &request, true);
325 ofpbuf_uninit(&request);
326 return error;
327}
328#endif
cceb11f5
BP
329/* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
330 * successful, otherwise a positive errno value.
331 *
a838c4fe
BP
332 * A socket that is subscribed to a multicast group that receives asynchronous
333 * notifications must not be used for Netlink transactions or dumps, because
334 * transactions and dumps can cause notifications to be lost.
335 *
cceb11f5
BP
336 * Multicast group numbers are always positive.
337 *
338 * It is not an error to attempt to join a multicast group to which a socket
339 * already belongs. */
340int
341nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
342{
22326ba6 343#ifdef _WIN32
64513e68
EE
344 /* Set the socket type as a "multicast" socket */
345 sock->read_ioctl = OVS_IOCTL_READ_EVENT;
b8f958ea
EE
346 int error = nl_sock_mcgroup(sock, multicast_group, true);
347 if (error) {
64513e68 348 sock->read_ioctl = OVS_IOCTL_READ;
b8f958ea 349 VLOG_WARN("could not join multicast group %u (%s)",
15fd9052
NR
350 multicast_group, ovs_strerror(error));
351 return error;
b8f958ea 352 }
22326ba6 353#else
cceb11f5
BP
354 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
355 &multicast_group, sizeof multicast_group) < 0) {
356 VLOG_WARN("could not join multicast group %u (%s)",
10a89ef0 357 multicast_group, ovs_strerror(errno));
cceb11f5
BP
358 return errno;
359 }
22326ba6 360#endif
cceb11f5
BP
361 return 0;
362}
363
36791e21
NR
364#ifdef _WIN32
365int
366nl_sock_subscribe_packets(struct nl_sock *sock)
367{
368 int error;
369
370 if (sock->read_ioctl != OVS_IOCTL_READ) {
371 return EINVAL;
372 }
373
374 error = nl_sock_subscribe_packet__(sock, true);
375 if (error) {
d8d1ef2f
AS
376 VLOG_WARN("could not subscribe packets (%s)",
377 ovs_strerror(error));
36791e21
NR
378 return error;
379 }
380 sock->read_ioctl = OVS_IOCTL_READ_PACKET;
381
382 return 0;
383}
384
385int
386nl_sock_unsubscribe_packets(struct nl_sock *sock)
387{
388 ovs_assert(sock->read_ioctl == OVS_IOCTL_READ_PACKET);
389
390 int error = nl_sock_subscribe_packet__(sock, false);
391 if (error) {
d8d1ef2f
AS
392 VLOG_WARN("could not unsubscribe to packets (%s)",
393 ovs_strerror(error));
36791e21
NR
394 return error;
395 }
396
397 sock->read_ioctl = OVS_IOCTL_READ;
398 return 0;
399}
400
401int
402nl_sock_subscribe_packet__(struct nl_sock *sock, bool subscribe)
403{
404 struct ofpbuf request;
405 uint64_t request_stub[128];
406 struct ovs_header *ovs_header;
407 struct nlmsghdr *nlmsg;
408 int error;
409
410 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
411 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
412 OVS_CTRL_CMD_PACKET_SUBSCRIBE_REQ,
413 OVS_WIN_CONTROL_VERSION);
414
415 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
416 ovs_header->dp_ifindex = 0;
417 nl_msg_put_u8(&request, OVS_NL_ATTR_PACKET_SUBSCRIBE, subscribe ? 1 : 0);
418 nl_msg_put_u32(&request, OVS_NL_ATTR_PACKET_PID, sock->pid);
419
420 error = nl_sock_send(sock, &request, true);
421 ofpbuf_uninit(&request);
422 return error;
423}
424#endif
425
cceb11f5
BP
426/* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
427 * successful, otherwise a positive errno value.
428 *
429 * Multicast group numbers are always positive.
430 *
431 * It is not an error to attempt to leave a multicast group to which a socket
432 * does not belong.
433 *
434 * On success, reading from 'sock' will still return any messages that were
435 * received on 'multicast_group' before the group was left. */
436int
437nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
438{
22326ba6 439#ifdef _WIN32
b8f958ea
EE
440 int error = nl_sock_mcgroup(sock, multicast_group, false);
441 if (error) {
442 VLOG_WARN("could not leave multicast group %u (%s)",
15fd9052
NR
443 multicast_group, ovs_strerror(error));
444 return error;
b8f958ea 445 }
64513e68 446 sock->read_ioctl = OVS_IOCTL_READ;
22326ba6 447#else
cceb11f5
BP
448 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
449 &multicast_group, sizeof multicast_group) < 0) {
450 VLOG_WARN("could not leave multicast group %u (%s)",
10a89ef0 451 multicast_group, ovs_strerror(errno));
cceb11f5
BP
452 return errno;
453 }
22326ba6 454#endif
cceb11f5
BP
455 return 0;
456}
457
c6eab56d 458static int
ff459dd6
BP
459nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg,
460 uint32_t nlmsg_seq, bool wait)
2fe27d5a
BP
461{
462 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
463 int error;
464
6fd6ed71 465 nlmsg->nlmsg_len = msg->size;
ff459dd6 466 nlmsg->nlmsg_seq = nlmsg_seq;
2fe27d5a
BP
467 nlmsg->nlmsg_pid = sock->pid;
468 do {
469 int retval;
22326ba6 470#ifdef _WIN32
fd972eb8
NR
471 DWORD bytes;
472
473 if (!DeviceIoControl(sock->handle, OVS_IOCTL_WRITE,
6fd6ed71 474 msg->data, msg->size, NULL, 0,
0fd22ae2 475 &bytes, NULL)) {
22326ba6 476 retval = -1;
fd972eb8
NR
477 /* XXX: Map to a more appropriate error based on GetLastError(). */
478 errno = EINVAL;
a51a5086
NR
479 VLOG_DBG_RL(&rl, "fatal driver failure in write: %s",
480 ovs_lasterror_to_string());
fd972eb8 481 } else {
6fd6ed71 482 retval = msg->size;
22326ba6
AS
483 }
484#else
6fd6ed71 485 retval = send(sock->fd, msg->data, msg->size,
fd972eb8 486 wait ? 0 : MSG_DONTWAIT);
22326ba6 487#endif
2fe27d5a
BP
488 error = retval < 0 ? errno : 0;
489 } while (error == EINTR);
6fd6ed71 490 log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol);
2fe27d5a
BP
491 if (!error) {
492 COVERAGE_INC(netlink_sent);
493 }
494 return error;
495}
496
c6eab56d 497/* Tries to send 'msg', which must contain a Netlink message, to the kernel on
6fd6ed71 498 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
ff459dd6
BP
499 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to a fresh
500 * sequence number, before the message is sent.
c6eab56d
BP
501 *
502 * Returns 0 if successful, otherwise a positive errno value. If
503 * 'wait' is true, then the send will wait until buffer space is ready;
504 * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
505int
506nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
ff459dd6
BP
507{
508 return nl_sock_send_seq(sock, msg, nl_sock_allocate_seq(sock, 1), wait);
509}
510
511/* Tries to send 'msg', which must contain a Netlink message, to the kernel on
6fd6ed71 512 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
ff459dd6
BP
513 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to
514 * 'nlmsg_seq', before the message is sent.
515 *
516 * Returns 0 if successful, otherwise a positive errno value. If
517 * 'wait' is true, then the send will wait until buffer space is ready;
518 * otherwise, returns EAGAIN if the 'sock' send buffer is full.
519 *
520 * This function is suitable for sending a reply to a request that was received
521 * with sequence number 'nlmsg_seq'. Otherwise, use nl_sock_send() instead. */
522int
523nl_sock_send_seq(struct nl_sock *sock, const struct ofpbuf *msg,
524 uint32_t nlmsg_seq, bool wait)
c6eab56d 525{
ff459dd6 526 return nl_sock_send__(sock, msg, nlmsg_seq, wait);
c6eab56d
BP
527}
528
c6eab56d 529static int
72d32ac0 530nl_sock_recv__(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
2fe27d5a 531{
72d32ac0
BP
532 /* We can't accurately predict the size of the data to be received. The
533 * caller is supposed to have allocated enough space in 'buf' to handle the
534 * "typical" case. To handle exceptions, we make available enough space in
535 * 'tail' to allow Netlink messages to be up to 64 kB long (a reasonable
536 * figure since that's the maximum length of a Netlink attribute). */
2fe27d5a 537 struct nlmsghdr *nlmsghdr;
72d32ac0 538 uint8_t tail[65536];
fc999dda 539 struct iovec iov[2];
fc999dda
BP
540 struct msghdr msg;
541 ssize_t retval;
8f20fd98 542 int error;
fc999dda 543
cb22974d 544 ovs_assert(buf->allocated >= sizeof *nlmsghdr);
72d32ac0 545 ofpbuf_clear(buf);
2fe27d5a 546
6fd6ed71 547 iov[0].iov_base = buf->base;
72d32ac0 548 iov[0].iov_len = buf->allocated;
fc999dda 549 iov[1].iov_base = tail;
72d32ac0 550 iov[1].iov_len = sizeof tail;
fc999dda
BP
551
552 memset(&msg, 0, sizeof msg);
553 msg.msg_iov = iov;
554 msg.msg_iovlen = 2;
555
8f20fd98
BP
556 /* Receive a Netlink message from the kernel.
557 *
558 * This works around a kernel bug in which the kernel returns an error code
559 * as if it were the number of bytes read. It doesn't actually modify
560 * anything in the receive buffer in that case, so we can initialize the
561 * Netlink header with an impossible message length and then, upon success,
562 * check whether it changed. */
6fd6ed71 563 nlmsghdr = buf->base;
2fe27d5a 564 do {
8f20fd98 565 nlmsghdr->nlmsg_len = UINT32_MAX;
22326ba6 566#ifdef _WIN32
fd972eb8 567 DWORD bytes;
64513e68 568 if (!DeviceIoControl(sock->handle, sock->read_ioctl,
fd972eb8 569 NULL, 0, tail, sizeof tail, &bytes, NULL)) {
a51a5086
NR
570 VLOG_DBG_RL(&rl, "fatal driver failure in transact: %s",
571 ovs_lasterror_to_string());
22326ba6 572 retval = -1;
a51a5086 573 /* XXX: Map to a more appropriate error. */
fd972eb8 574 errno = EINVAL;
22326ba6 575 } else {
fd972eb8
NR
576 retval = bytes;
577 if (retval == 0) {
578 retval = -1;
579 errno = EAGAIN;
580 } else {
581 if (retval >= buf->allocated) {
582 ofpbuf_reinit(buf, retval);
6fd6ed71 583 nlmsghdr = buf->base;
15fd9052 584 nlmsghdr->nlmsg_len = UINT32_MAX;
fd972eb8 585 }
6fd6ed71
PS
586 memcpy(buf->data, tail, retval);
587 buf->size = retval;
fd972eb8 588 }
22326ba6
AS
589 }
590#else
fc999dda 591 retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
22326ba6 592#endif
8f20fd98
BP
593 error = (retval < 0 ? errno
594 : retval == 0 ? ECONNRESET /* not possible? */
595 : nlmsghdr->nlmsg_len != UINT32_MAX ? 0
7f8e2646 596 : retval);
8f20fd98
BP
597 } while (error == EINTR);
598 if (error) {
fc999dda
BP
599 if (error == ENOBUFS) {
600 /* Socket receive buffer overflow dropped one or more messages that
601 * the kernel tried to send to us. */
602 COVERAGE_INC(netlink_overflow);
603 }
fc999dda 604 return error;
2fe27d5a 605 }
fc999dda 606
2fe27d5a 607 if (msg.msg_flags & MSG_TRUNC) {
34582733 608 VLOG_ERR_RL(&rl, "truncated message (longer than %"PRIuSIZE" bytes)",
72d32ac0 609 sizeof tail);
fc999dda 610 return E2BIG;
2fe27d5a 611 }
2fe27d5a 612
fc999dda 613 if (retval < sizeof *nlmsghdr
2fe27d5a 614 || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
fc999dda 615 || nlmsghdr->nlmsg_len > retval) {
e5e4b47c 616 VLOG_ERR_RL(&rl, "received invalid nlmsg (%"PRIuSIZE" bytes < %"PRIuSIZE")",
72d32ac0 617 retval, sizeof *nlmsghdr);
2fe27d5a
BP
618 return EPROTO;
619 }
22326ba6 620#ifndef _WIN32
6fd6ed71 621 buf->size = MIN(retval, buf->allocated);
72d32ac0
BP
622 if (retval > buf->allocated) {
623 COVERAGE_INC(netlink_recv_jumbo);
624 ofpbuf_put(buf, tail, retval - buf->allocated);
625 }
22326ba6 626#endif
72d32ac0 627
6fd6ed71 628 log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol);
2fe27d5a
BP
629 COVERAGE_INC(netlink_received);
630
631 return 0;
632}
633
72d32ac0
BP
634/* Tries to receive a Netlink message from the kernel on 'sock' into 'buf'. If
635 * 'wait' is true, waits for a message to be ready. Otherwise, fails with
636 * EAGAIN if the 'sock' receive buffer is empty.
637 *
638 * The caller must have initialized 'buf' with an allocation of at least
639 * NLMSG_HDRLEN bytes. For best performance, the caller should allocate enough
640 * space for a "typical" message.
641 *
642 * On success, returns 0 and replaces 'buf''s previous content by the received
643 * message. This function expands 'buf''s allocated memory, as necessary, to
644 * hold the actual size of the received message.
c6eab56d 645 *
72d32ac0
BP
646 * On failure, returns a positive errno value and clears 'buf' to zero length.
647 * 'buf' retains its previous memory allocation.
648 *
649 * Regardless of success or failure, this function resets 'buf''s headroom to
650 * 0. */
c6eab56d 651int
72d32ac0 652nl_sock_recv(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
c6eab56d 653{
72d32ac0 654 return nl_sock_recv__(sock, buf, wait);
cc75061a
BP
655}
656
657static void
658nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
659 int error)
660{
661 size_t i;
662
663 for (i = 0; i < n; i++) {
72d32ac0
BP
664 struct nl_transaction *txn = transactions[i];
665
666 txn->error = error;
667 if (txn->reply) {
668 ofpbuf_clear(txn->reply);
669 }
cc75061a
BP
670 }
671}
672
673static int
674nl_sock_transact_multiple__(struct nl_sock *sock,
675 struct nl_transaction **transactions, size_t n,
676 size_t *done)
677{
72d32ac0
BP
678 uint64_t tmp_reply_stub[1024 / 8];
679 struct nl_transaction tmp_txn;
680 struct ofpbuf tmp_reply;
681
682 uint32_t base_seq;
cc75061a
BP
683 struct iovec iovs[MAX_IOVS];
684 struct msghdr msg;
685 int error;
686 int i;
687
72d32ac0 688 base_seq = nl_sock_allocate_seq(sock, n);
cc75061a
BP
689 *done = 0;
690 for (i = 0; i < n; i++) {
72d32ac0
BP
691 struct nl_transaction *txn = transactions[i];
692 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);
cc75061a 693
6fd6ed71 694 nlmsg->nlmsg_len = txn->request->size;
72d32ac0 695 nlmsg->nlmsg_seq = base_seq + i;
cc75061a 696 nlmsg->nlmsg_pid = sock->pid;
cc75061a 697
6fd6ed71
PS
698 iovs[i].iov_base = txn->request->data;
699 iovs[i].iov_len = txn->request->size;
cc75061a
BP
700 }
701
0fd22ae2 702#ifndef _WIN32
cc75061a
BP
703 memset(&msg, 0, sizeof msg);
704 msg.msg_iov = iovs;
705 msg.msg_iovlen = n;
706 do {
707 error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
708 } while (error == EINTR);
709
710 for (i = 0; i < n; i++) {
72d32ac0 711 struct nl_transaction *txn = transactions[i];
cc75061a 712
6fd6ed71
PS
713 log_nlmsg(__func__, error, txn->request->data,
714 txn->request->size, sock->protocol);
cc75061a
BP
715 }
716 if (!error) {
717 COVERAGE_ADD(netlink_sent, n);
718 }
719
720 if (error) {
721 return error;
722 }
723
72d32ac0
BP
724 ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
725 tmp_txn.request = NULL;
726 tmp_txn.reply = &tmp_reply;
727 tmp_txn.error = 0;
cc75061a 728 while (n > 0) {
72d32ac0
BP
729 struct nl_transaction *buf_txn, *txn;
730 uint32_t seq;
731
732 /* Find a transaction whose buffer we can use for receiving a reply.
733 * If no such transaction is left, use tmp_txn. */
734 buf_txn = &tmp_txn;
735 for (i = 0; i < n; i++) {
736 if (transactions[i]->reply) {
737 buf_txn = transactions[i];
738 break;
739 }
740 }
cc75061a 741
72d32ac0
BP
742 /* Receive a reply. */
743 error = nl_sock_recv__(sock, buf_txn->reply, false);
744 if (error) {
745 if (error == EAGAIN) {
746 nl_sock_record_errors__(transactions, n, 0);
747 *done += n;
748 error = 0;
749 }
750 break;
cc75061a
BP
751 }
752
72d32ac0
BP
753 /* Match the reply up with a transaction. */
754 seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
755 if (seq < base_seq || seq >= base_seq + n) {
756 VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
cc75061a
BP
757 continue;
758 }
72d32ac0
BP
759 i = seq - base_seq;
760 txn = transactions[i];
cc75061a 761
72d32ac0
BP
762 /* Fill in the results for 'txn'. */
763 if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
764 if (txn->reply) {
765 ofpbuf_clear(txn->reply);
766 }
767 if (txn->error) {
cc75061a 768 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
10a89ef0 769 error, ovs_strerror(txn->error));
cc75061a 770 }
cc75061a 771 } else {
72d32ac0
BP
772 txn->error = 0;
773 if (txn->reply && txn != buf_txn) {
774 /* Swap buffers. */
775 struct ofpbuf *reply = buf_txn->reply;
776 buf_txn->reply = txn->reply;
777 txn->reply = reply;
778 }
cc75061a
BP
779 }
780
72d32ac0
BP
781 /* Fill in the results for transactions before 'txn'. (We have to do
782 * this after the results for 'txn' itself because of the buffer swap
783 * above.) */
784 nl_sock_record_errors__(transactions, i, 0);
785
786 /* Advance. */
cc75061a
BP
787 *done += i + 1;
788 transactions += i + 1;
789 n -= i + 1;
72d32ac0 790 base_seq += i + 1;
cc75061a 791 }
72d32ac0 792 ofpbuf_uninit(&tmp_reply);
0fd22ae2
NR
793#else
794 error = 0;
9189184d 795 uint8_t reply_buf[65536];
0fd22ae2
NR
796 for (i = 0; i < n; i++) {
797 DWORD reply_len;
a51a5086 798 bool ret;
0fd22ae2
NR
799 struct nl_transaction *txn = transactions[i];
800 struct nlmsghdr *request_nlmsg, *reply_nlmsg;
801
a51a5086
NR
802 ret = DeviceIoControl(sock->handle, OVS_IOCTL_TRANSACT,
803 txn->request->data,
804 txn->request->size,
805 reply_buf, sizeof reply_buf,
806 &reply_len, NULL);
807
808 if (ret && reply_len == 0) {
809 /*
810 * The current transaction did not produce any data to read and that
811 * is not an error as such. Continue with the remainder of the
812 * transactions.
813 */
814 txn->error = 0;
815 if (txn->reply) {
816 ofpbuf_clear(txn->reply);
817 }
818 } else if (!ret) {
0fd22ae2
NR
819 /* XXX: Map to a more appropriate error. */
820 error = EINVAL;
b937e116
BP
821 VLOG_DBG_RL(&rl, "fatal driver failure: %s",
822 ovs_lasterror_to_string());
0fd22ae2
NR
823 break;
824 }
825
a51a5086
NR
826 if (reply_len != 0) {
827 if (reply_len < sizeof *reply_nlmsg) {
828 nl_sock_record_errors__(transactions, n, 0);
829 VLOG_DBG_RL(&rl, "insufficient length of reply %#"PRIu32
830 " for seq: %#"PRIx32, reply_len, request_nlmsg->nlmsg_seq);
831 break;
832 }
0fd22ae2 833
a51a5086
NR
834 /* Validate the sequence number in the reply. */
835 request_nlmsg = nl_msg_nlmsghdr(txn->request);
836 reply_nlmsg = (struct nlmsghdr *)reply_buf;
0fd22ae2 837
a51a5086
NR
838 if (request_nlmsg->nlmsg_seq != reply_nlmsg->nlmsg_seq) {
839 ovs_assert(request_nlmsg->nlmsg_seq == reply_nlmsg->nlmsg_seq);
840 VLOG_DBG_RL(&rl, "mismatched seq request %#"PRIx32
841 ", reply %#"PRIx32, request_nlmsg->nlmsg_seq,
842 reply_nlmsg->nlmsg_seq);
843 break;
0fd22ae2 844 }
a51a5086
NR
845
846 /* Handle errors embedded within the netlink message. */
847 ofpbuf_use_stub(&tmp_reply, reply_buf, sizeof reply_buf);
848 tmp_reply.size = sizeof reply_buf;
849 if (nl_msg_nlmsgerr(&tmp_reply, &txn->error)) {
850 if (txn->reply) {
851 ofpbuf_clear(txn->reply);
852 }
853 if (txn->error) {
854 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
855 error, ovs_strerror(txn->error));
856 }
857 } else {
858 txn->error = 0;
859 if (txn->reply) {
860 /* Copy the reply to the buffer specified by the caller. */
861 if (reply_len > txn->reply->allocated) {
862 ofpbuf_reinit(txn->reply, reply_len);
863 }
864 memcpy(txn->reply->data, reply_buf, reply_len);
865 txn->reply->size = reply_len;
0fd22ae2 866 }
0fd22ae2 867 }
a51a5086 868 ofpbuf_uninit(&tmp_reply);
0fd22ae2
NR
869 }
870
871 /* Count the number of successful transactions. */
872 (*done)++;
9189184d 873
0fd22ae2
NR
874 }
875
876 if (!error) {
877 COVERAGE_ADD(netlink_sent, n);
878 }
879#endif
cc75061a 880
72d32ac0 881 return error;
cc75061a
BP
882}
883
022ad2b9 884static void
cc75061a
BP
885nl_sock_transact_multiple(struct nl_sock *sock,
886 struct nl_transaction **transactions, size_t n)
887{
888 int max_batch_count;
889 int error;
890
891 if (!n) {
892 return;
893 }
894
cc75061a
BP
895 /* In theory, every request could have a 64 kB reply. But the default and
896 * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
897 * be a bit below 128 kB, so that would only allow a single message in a
898 * "batch". So we assume that replies average (at most) 4 kB, which allows
899 * a good deal of batching.
900 *
901 * In practice, most of the requests that we batch either have no reply at
902 * all or a brief reply. */
903 max_batch_count = MAX(sock->rcvbuf / 4096, 1);
904 max_batch_count = MIN(max_batch_count, max_iovs);
905
906 while (n > 0) {
907 size_t count, bytes;
908 size_t done;
909
910 /* Batch up to 'max_batch_count' transactions. But cap it at about a
911 * page of requests total because big skbuffs are expensive to
912 * allocate in the kernel. */
913#if defined(PAGESIZE)
914 enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
915#else
916 enum { MAX_BATCH_BYTES = 4096 - 512 };
917#endif
6fd6ed71 918 bytes = transactions[0]->request->size;
cc75061a 919 for (count = 1; count < n && count < max_batch_count; count++) {
6fd6ed71 920 if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
cc75061a
BP
921 break;
922 }
6fd6ed71 923 bytes += transactions[count]->request->size;
cc75061a
BP
924 }
925
926 error = nl_sock_transact_multiple__(sock, transactions, count, &done);
927 transactions += done;
928 n -= done;
929
930 if (error == ENOBUFS) {
931 VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
932 } else if (error) {
10a89ef0 933 VLOG_ERR_RL(&rl, "transaction error (%s)", ovs_strerror(error));
cc75061a 934 nl_sock_record_errors__(transactions, n, error);
b937e116
BP
935 if (error != EAGAIN) {
936 /* A fatal error has occurred. Abort the rest of
937 * transactions. */
938 break;
939 }
cc75061a
BP
940 }
941 }
942}
943
022ad2b9 944static int
cc75061a
BP
945nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
946 struct ofpbuf **replyp)
2fe27d5a 947{
cc75061a
BP
948 struct nl_transaction *transactionp;
949 struct nl_transaction transaction;
2fe27d5a 950
ebc56baa 951 transaction.request = CONST_CAST(struct ofpbuf *, request);
72d32ac0 952 transaction.reply = replyp ? ofpbuf_new(1024) : NULL;
cc75061a 953 transactionp = &transaction;
72d32ac0 954
cc75061a 955 nl_sock_transact_multiple(sock, &transactionp, 1);
72d32ac0 956
2fe27d5a 957 if (replyp) {
72d32ac0
BP
958 if (transaction.error) {
959 ofpbuf_delete(transaction.reply);
960 *replyp = NULL;
961 } else {
962 *replyp = transaction.reply;
963 }
2fe27d5a 964 }
72d32ac0 965
cc75061a 966 return transaction.error;
2fe27d5a
BP
967}
968
6b7c12fd
BP
969/* Drain all the messages currently in 'sock''s receive queue. */
970int
971nl_sock_drain(struct nl_sock *sock)
972{
22326ba6
AS
973#ifdef _WIN32
974 return 0;
975#else
6b7c12fd 976 return drain_rcvbuf(sock->fd);
22326ba6 977#endif
6b7c12fd
BP
978}
979
a88b4e04
BP
980/* Starts a Netlink "dump" operation, by sending 'request' to the kernel on a
981 * Netlink socket created with the given 'protocol', and initializes 'dump' to
982 * reflect the state of the operation.
2fe27d5a 983 *
db1fc210
JS
984 * 'request' must contain a Netlink message. Before sending the message,
985 * nlmsg_len will be finalized to match request->size, and nlmsg_pid will be
986 * set to the Netlink socket's pid. NLM_F_DUMP and NLM_F_ACK will be set in
987 * nlmsg_flags.
2fe27d5a 988 *
a88b4e04 989 * The design of this Netlink socket library ensures that the dump is reliable.
2fe27d5a 990 *
db1fc210
JS
991 * This function provides no status indication. nl_dump_done() provides an
992 * error status for the entire dump operation.
2fe27d5a 993 *
db1fc210 994 * The caller must eventually destroy 'request'.
2fe27d5a
BP
995 */
996void
a88b4e04 997nl_dump_start(struct nl_dump *dump, int protocol, const struct ofpbuf *request)
2fe27d5a 998{
7d7447df 999 nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
93295354
BP
1000
1001 ovs_mutex_init(&dump->mutex);
1002 ovs_mutex_lock(&dump->mutex);
1003 dump->status = nl_pool_alloc(protocol, &dump->sock);
1004 if (!dump->status) {
1005 dump->status = nl_sock_send__(dump->sock, request,
1006 nl_sock_allocate_seq(dump->sock, 1),
1007 true);
b2d1c78a 1008 }
9c8ad495 1009 dump->nl_seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
93295354
BP
1010 ovs_mutex_unlock(&dump->mutex);
1011}
1012
1013static int
1014nl_dump_refill(struct nl_dump *dump, struct ofpbuf *buffer)
1015 OVS_REQUIRES(dump->mutex)
1016{
1017 struct nlmsghdr *nlmsghdr;
1018 int error;
1019
6fd6ed71 1020 while (!buffer->size) {
1738803a 1021 error = nl_sock_recv__(dump->sock, buffer, false);
93295354 1022 if (error) {
1738803a
AW
1023 /* The kernel never blocks providing the results of a dump, so
1024 * error == EAGAIN means that we've read the whole thing, and
1025 * therefore transform it into EOF. (The kernel always provides
1026 * NLMSG_DONE as a sentinel. Some other thread must have received
1027 * that already but not yet signaled it in 'status'.)
1028 *
1029 * Any other error is just an error. */
93295354
BP
1030 return error == EAGAIN ? EOF : error;
1031 }
1032
1033 nlmsghdr = nl_msg_nlmsghdr(buffer);
1034 if (dump->nl_seq != nlmsghdr->nlmsg_seq) {
1035 VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
1036 nlmsghdr->nlmsg_seq, dump->nl_seq);
1037 ofpbuf_clear(buffer);
1038 }
1039 }
1040
1041 if (nl_msg_nlmsgerr(buffer, &error) && error) {
1042 VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
1043 ovs_strerror(error));
1044 ofpbuf_clear(buffer);
1045 return error;
1046 }
1047
1048 return 0;
1049}
1050
1051static int
1052nl_dump_next__(struct ofpbuf *reply, struct ofpbuf *buffer)
1053{
1054 struct nlmsghdr *nlmsghdr = nl_msg_next(buffer, reply);
1055 if (!nlmsghdr) {
1056 VLOG_WARN_RL(&rl, "netlink dump contains message fragment");
1057 return EPROTO;
1058 } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
1059 return EOF;
1060 } else {
1061 return 0;
1062 }
2fe27d5a
BP
1063}
1064
d57695d7
JS
1065/* Attempts to retrieve another reply from 'dump' into 'buffer'. 'dump' must
1066 * have been initialized with nl_dump_start(), and 'buffer' must have been
1067 * initialized. 'buffer' should be at least NL_DUMP_BUFSIZE bytes long.
2fe27d5a 1068 *
19aa20a0 1069 * If successful, returns true and points 'reply->data' and
6fd6ed71 1070 * 'reply->size' to the message that was retrieved. The caller must not
19aa20a0
BP
1071 * modify 'reply' (because it points within 'buffer', which will be used by
1072 * future calls to this function).
1073 *
1074 * On failure, returns false and sets 'reply->data' to NULL and
6fd6ed71 1075 * 'reply->size' to 0. Failure might indicate an actual error or merely
19aa20a0
BP
1076 * the end of replies. An error status for the entire dump operation is
1077 * provided when it is completed by calling nl_dump_done().
0672776e
JS
1078 *
1079 * Multiple threads may call this function, passing the same nl_dump, however
1080 * each must provide independent buffers. This function may cache multiple
1081 * replies in the buffer, and these will be processed before more replies are
1082 * fetched. When this function returns false, other threads may continue to
1083 * process replies in their buffers, but they will not fetch more replies.
2fe27d5a
BP
1084 */
1085bool
d57695d7 1086nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply, struct ofpbuf *buffer)
2fe27d5a 1087{
93295354 1088 int retval = 0;
0672776e 1089
93295354
BP
1090 /* If the buffer is empty, refill it.
1091 *
1092 * If the buffer is not empty, we don't check the dump's status.
1093 * Otherwise, we could end up skipping some of the dump results if thread A
1094 * hits EOF while thread B is in the midst of processing a batch. */
6fd6ed71 1095 if (!buffer->size) {
0791315e 1096 ovs_mutex_lock(&dump->mutex);
93295354
BP
1097 if (!dump->status) {
1098 /* Take the mutex here to avoid an in-kernel race. If two threads
1099 * try to read from a Netlink dump socket at once, then the socket
1100 * error can be set to EINVAL, which will be encountered on the
1101 * next recv on that socket, which could be anywhere due to the way
1102 * that we pool Netlink sockets. Serializing the recv calls avoids
1103 * the issue. */
1104 dump->status = nl_dump_refill(dump, buffer);
1105 }
1106 retval = dump->status;
0791315e 1107 ovs_mutex_unlock(&dump->mutex);
93295354 1108 }
0791315e 1109
93295354
BP
1110 /* Fetch the next message from the buffer. */
1111 if (!retval) {
1112 retval = nl_dump_next__(reply, buffer);
2fe27d5a 1113 if (retval) {
93295354
BP
1114 /* Record 'retval' as the dump status, but don't overwrite an error
1115 * with EOF. */
1116 ovs_mutex_lock(&dump->mutex);
1117 if (dump->status <= 0) {
1118 dump->status = retval;
2fe27d5a 1119 }
93295354 1120 ovs_mutex_unlock(&dump->mutex);
2fe27d5a 1121 }
2fe27d5a
BP
1122 }
1123
93295354 1124 if (retval) {
6fd6ed71
PS
1125 reply->data = NULL;
1126 reply->size = 0;
0672776e 1127 }
93295354 1128 return !retval;
2fe27d5a
BP
1129}
1130
1131/* Completes Netlink dump operation 'dump', which must have been initialized
1132 * with nl_dump_start(). Returns 0 if the dump operation was error-free,
1133 * otherwise a positive errno value describing the problem. */
1134int
1135nl_dump_done(struct nl_dump *dump)
1136{
0672776e 1137 int status;
d57695d7 1138
93295354
BP
1139 ovs_mutex_lock(&dump->mutex);
1140 status = dump->status;
1141 ovs_mutex_unlock(&dump->mutex);
1142
2fe27d5a 1143 /* Drain any remaining messages that the client didn't read. Otherwise the
a88b4e04
BP
1144 * kernel will continue to queue them up and waste buffer space.
1145 *
1146 * XXX We could just destroy and discard the socket in this case. */
0672776e
JS
1147 if (!status) {
1148 uint64_t tmp_reply_stub[NL_DUMP_BUFSIZE / 8];
1149 struct ofpbuf reply, buf;
1150
1151 ofpbuf_use_stub(&buf, tmp_reply_stub, sizeof tmp_reply_stub);
1152 while (nl_dump_next(dump, &reply, &buf)) {
1153 /* Nothing to do. */
2fe27d5a 1154 }
0672776e 1155 ofpbuf_uninit(&buf);
93295354
BP
1156
1157 ovs_mutex_lock(&dump->mutex);
1158 status = dump->status;
1159 ovs_mutex_unlock(&dump->mutex);
1160 ovs_assert(status);
2fe27d5a 1161 }
93295354 1162
a88b4e04 1163 nl_pool_release(dump->sock);
0791315e 1164 ovs_mutex_destroy(&dump->mutex);
93295354
BP
1165
1166 return status == EOF ? 0 : status;
2fe27d5a
BP
1167}
1168
7fa09611
EE
1169#ifdef _WIN32
1170/* Pend an I/O request in the driver. The driver completes the I/O whenever
1171 * an event or a packet is ready to be read. Once the I/O is completed
1172 * the overlapped structure event associated with the pending I/O will be set
1173 */
1174static int
8341662d 1175pend_io_request(struct nl_sock *sock)
7fa09611
EE
1176{
1177 struct ofpbuf request;
1178 uint64_t request_stub[128];
1179 struct ovs_header *ovs_header;
1180 struct nlmsghdr *nlmsg;
1181 uint32_t seq;
b91d3d03 1182 int retval = 0;
7fa09611
EE
1183 int error;
1184 DWORD bytes;
64513e68 1185 OVERLAPPED *overlapped = CONST_CAST(OVERLAPPED *, &sock->overlapped);
b91d3d03
NR
1186 uint16_t cmd = OVS_CTRL_CMD_WIN_PEND_PACKET_REQ;
1187
1188 ovs_assert(sock->read_ioctl == OVS_IOCTL_READ_PACKET ||
1189 sock->read_ioctl == OVS_IOCTL_READ_EVENT);
1190 if (sock->read_ioctl == OVS_IOCTL_READ_EVENT) {
1191 cmd = OVS_CTRL_CMD_WIN_PEND_REQ;
1192 }
7fa09611
EE
1193
1194 int ovs_msg_size = sizeof (struct nlmsghdr) + sizeof (struct genlmsghdr) +
1195 sizeof (struct ovs_header);
1196
1197 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
1198
1199 seq = nl_sock_allocate_seq(sock, 1);
1200 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
b91d3d03 1201 cmd, OVS_WIN_CONTROL_VERSION);
7fa09611
EE
1202 nlmsg = nl_msg_nlmsghdr(&request);
1203 nlmsg->nlmsg_seq = seq;
92a5068f 1204 nlmsg->nlmsg_pid = sock->pid;
7fa09611
EE
1205
1206 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
1207 ovs_header->dp_ifindex = 0;
1208
1209 if (!DeviceIoControl(sock->handle, OVS_IOCTL_WRITE,
6fd6ed71 1210 request.data, request.size,
7fa09611
EE
1211 NULL, 0, &bytes, overlapped)) {
1212 error = GetLastError();
1213 /* Check if the I/O got pended */
1214 if (error != ERROR_IO_INCOMPLETE && error != ERROR_IO_PENDING) {
1215 VLOG_ERR("nl_sock_wait failed - %s\n", ovs_format_message(error));
1216 retval = EINVAL;
7fa09611
EE
1217 }
1218 } else {
b91d3d03 1219 retval = EAGAIN;
7fa09611 1220 }
7fa09611
EE
1221
1222done:
1223 ofpbuf_uninit(&request);
1224 return retval;
1225}
1226#endif /* _WIN32 */
1227
2fe27d5a 1228/* Causes poll_block() to wake up when any of the specified 'events' (which is
8341662d
NR
1229 * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'.
1230 * On Windows, 'sock' is not treated as const, and may be modified. */
2fe27d5a
BP
1231void
1232nl_sock_wait(const struct nl_sock *sock, short int events)
1233{
22326ba6 1234#ifdef _WIN32
7fa09611 1235 if (sock->overlapped.Internal != STATUS_PENDING) {
b91d3d03
NR
1236 int ret = pend_io_request(CONST_CAST(struct nl_sock *, sock));
1237 if (ret == 0) {
1238 poll_wevent_wait(sock->overlapped.hEvent);
1239 } else {
1240 poll_immediate_wake();
1241 }
1242 } else {
1243 poll_wevent_wait(sock->overlapped.hEvent);
7fa09611 1244 }
22326ba6 1245#else
2fe27d5a 1246 poll_fd_wait(sock->fd, events);
22326ba6 1247#endif
2fe27d5a 1248}
50802adb 1249
9667de98 1250#ifndef _WIN32
8522ba09
BP
1251/* Returns the underlying fd for 'sock', for use in "poll()"-like operations
1252 * that can't use nl_sock_wait().
1253 *
1254 * It's a little tricky to use the returned fd correctly, because nl_sock does
1255 * "copy on write" to allow a single nl_sock to be used for notifications,
1256 * transactions, and dumps. If 'sock' is used only for notifications and
1257 * transactions (and never for dump) then the usage is safe. */
1258int
1259nl_sock_fd(const struct nl_sock *sock)
1260{
1261 return sock->fd;
1262}
9667de98 1263#endif
8522ba09 1264
50802adb
JG
1265/* Returns the PID associated with this socket. */
1266uint32_t
1267nl_sock_pid(const struct nl_sock *sock)
1268{
1269 return sock->pid;
1270}
2fe27d5a
BP
1271\f
1272/* Miscellaneous. */
1273
2ad204c8
BP
1274struct genl_family {
1275 struct hmap_node hmap_node;
1276 uint16_t id;
1277 char *name;
1278};
1279
1280static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
1281
2fe27d5a
BP
1282static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
1283 [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
213a13ed 1284 [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
2fe27d5a
BP
1285};
1286
2ad204c8
BP
1287static struct genl_family *
1288find_genl_family_by_id(uint16_t id)
1289{
1290 struct genl_family *family;
1291
1292 HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
1293 &genl_families) {
1294 if (family->id == id) {
1295 return family;
1296 }
1297 }
1298 return NULL;
1299}
1300
1301static void
1302define_genl_family(uint16_t id, const char *name)
1303{
1304 struct genl_family *family = find_genl_family_by_id(id);
1305
1306 if (family) {
1307 if (!strcmp(family->name, name)) {
1308 return;
1309 }
1310 free(family->name);
1311 } else {
1312 family = xmalloc(sizeof *family);
1313 family->id = id;
1314 hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
1315 }
1316 family->name = xstrdup(name);
1317}
1318
1319static const char *
1320genl_family_to_name(uint16_t id)
1321{
1322 if (id == GENL_ID_CTRL) {
1323 return "control";
1324 } else {
1325 struct genl_family *family = find_genl_family_by_id(id);
1326 return family ? family->name : "unknown";
1327 }
1328}
1329
b3fca8a8 1330#ifndef _WIN32
e408762f 1331static int
2a477244
BP
1332do_lookup_genl_family(const char *name, struct nlattr **attrs,
1333 struct ofpbuf **replyp)
2fe27d5a
BP
1334{
1335 struct nl_sock *sock;
1336 struct ofpbuf request, *reply;
2a477244 1337 int error;
2fe27d5a 1338
2a477244
BP
1339 *replyp = NULL;
1340 error = nl_sock_create(NETLINK_GENERIC, &sock);
1341 if (error) {
1342 return error;
2fe27d5a
BP
1343 }
1344
1345 ofpbuf_init(&request, 0);
1346 nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
1347 CTRL_CMD_GETFAMILY, 1);
1348 nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
2a477244 1349 error = nl_sock_transact(sock, &request, &reply);
2fe27d5a 1350 ofpbuf_uninit(&request);
2a477244 1351 if (error) {
2fe27d5a 1352 nl_sock_destroy(sock);
2a477244 1353 return error;
2fe27d5a
BP
1354 }
1355
1356 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
2a477244
BP
1357 family_policy, attrs, ARRAY_SIZE(family_policy))
1358 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
2fe27d5a
BP
1359 nl_sock_destroy(sock);
1360 ofpbuf_delete(reply);
2a477244 1361 return EPROTO;
2fe27d5a
BP
1362 }
1363
2fe27d5a 1364 nl_sock_destroy(sock);
2a477244
BP
1365 *replyp = reply;
1366 return 0;
2fe27d5a 1367}
b3fca8a8
NR
1368#else
1369static int
1370do_lookup_genl_family(const char *name, struct nlattr **attrs,
1371 struct ofpbuf **replyp)
1372{
4c484aca 1373 struct nlmsghdr *nlmsg;
b3fca8a8
NR
1374 struct ofpbuf *reply;
1375 int error;
1376 uint16_t family_id;
1377 const char *family_name;
1378 uint32_t family_version;
1379 uint32_t family_attrmax;
4c484aca
NR
1380 uint32_t mcgrp_id = OVS_WIN_NL_INVALID_MCGRP_ID;
1381 const char *mcgrp_name = NULL;
b3fca8a8
NR
1382
1383 *replyp = NULL;
1384 reply = ofpbuf_new(1024);
1385
4c484aca 1386 /* CTRL_ATTR_MCAST_GROUPS is supported only for VPORT family. */
b3fca8a8
NR
1387 if (!strcmp(name, OVS_WIN_CONTROL_FAMILY)) {
1388 family_id = OVS_WIN_NL_CTRL_FAMILY_ID;
1389 family_name = OVS_WIN_CONTROL_FAMILY;
1390 family_version = OVS_WIN_CONTROL_VERSION;
1391 family_attrmax = OVS_WIN_CONTROL_ATTR_MAX;
1392 } else if (!strcmp(name, OVS_DATAPATH_FAMILY)) {
1393 family_id = OVS_WIN_NL_DATAPATH_FAMILY_ID;
1394 family_name = OVS_DATAPATH_FAMILY;
1395 family_version = OVS_DATAPATH_VERSION;
1396 family_attrmax = OVS_DP_ATTR_MAX;
1397 } else if (!strcmp(name, OVS_PACKET_FAMILY)) {
1398 family_id = OVS_WIN_NL_PACKET_FAMILY_ID;
1399 family_name = OVS_PACKET_FAMILY;
1400 family_version = OVS_PACKET_VERSION;
1401 family_attrmax = OVS_PACKET_ATTR_MAX;
1402 } else if (!strcmp(name, OVS_VPORT_FAMILY)) {
1403 family_id = OVS_WIN_NL_VPORT_FAMILY_ID;
1404 family_name = OVS_VPORT_FAMILY;
1405 family_version = OVS_VPORT_VERSION;
1406 family_attrmax = OVS_VPORT_ATTR_MAX;
4c484aca
NR
1407 mcgrp_id = OVS_WIN_NL_VPORT_MCGRP_ID;
1408 mcgrp_name = OVS_VPORT_MCGROUP;
b3fca8a8
NR
1409 } else if (!strcmp(name, OVS_FLOW_FAMILY)) {
1410 family_id = OVS_WIN_NL_FLOW_FAMILY_ID;
1411 family_name = OVS_FLOW_FAMILY;
1412 family_version = OVS_FLOW_VERSION;
1413 family_attrmax = OVS_FLOW_ATTR_MAX;
83cc9d56
NR
1414 } else if (!strcmp(name, OVS_WIN_NETDEV_FAMILY)) {
1415 family_id = OVS_WIN_NL_NETDEV_FAMILY_ID;
1416 family_name = OVS_WIN_NETDEV_FAMILY;
1417 family_version = OVS_WIN_NETDEV_VERSION;
1418 family_attrmax = OVS_WIN_NETDEV_ATTR_MAX;
b3fca8a8
NR
1419 } else {
1420 ofpbuf_delete(reply);
1421 return EINVAL;
1422 }
1423
1424 nl_msg_put_genlmsghdr(reply, 0, GENL_ID_CTRL, 0,
1425 CTRL_CMD_NEWFAMILY, family_version);
1426 /* CTRL_ATTR_HDRSIZE and CTRL_ATTR_OPS are not populated, but the
1427 * callers do not seem to need them. */
1428 nl_msg_put_u16(reply, CTRL_ATTR_FAMILY_ID, family_id);
1429 nl_msg_put_string(reply, CTRL_ATTR_FAMILY_NAME, family_name);
1430 nl_msg_put_u32(reply, CTRL_ATTR_VERSION, family_version);
1431 nl_msg_put_u32(reply, CTRL_ATTR_MAXATTR, family_attrmax);
1432
4c484aca
NR
1433 if (mcgrp_id != OVS_WIN_NL_INVALID_MCGRP_ID) {
1434 size_t mcgrp_ofs1 = nl_msg_start_nested(reply, CTRL_ATTR_MCAST_GROUPS);
1435 size_t mcgrp_ofs2= nl_msg_start_nested(reply,
1436 OVS_WIN_NL_VPORT_MCGRP_ID - OVS_WIN_NL_MCGRP_START_ID);
1437 nl_msg_put_u32(reply, CTRL_ATTR_MCAST_GRP_ID, mcgrp_id);
1438 ovs_assert(mcgrp_name != NULL);
1439 nl_msg_put_string(reply, CTRL_ATTR_MCAST_GRP_NAME, mcgrp_name);
1440 nl_msg_end_nested(reply, mcgrp_ofs2);
1441 nl_msg_end_nested(reply, mcgrp_ofs1);
1442 }
1443
1444 /* Set the total length of the netlink message. */
1445 nlmsg = nl_msg_nlmsghdr(reply);
6fd6ed71 1446 nlmsg->nlmsg_len = reply->size;
4c484aca 1447
b3fca8a8
NR
1448 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
1449 family_policy, attrs, ARRAY_SIZE(family_policy))
1450 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
b3fca8a8
NR
1451 ofpbuf_delete(reply);
1452 return EPROTO;
1453 }
1454
1455 *replyp = reply;
1456 return 0;
1457}
1458#endif
2fe27d5a 1459
e408762f
EJ
1460/* Finds the multicast group called 'group_name' in genl family 'family_name'.
1461 * When successful, writes its result to 'multicast_group' and returns 0.
213a13ed 1462 * Otherwise, clears 'multicast_group' and returns a positive error code.
b3dcb73c 1463 */
e408762f
EJ
1464int
1465nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
b3dcb73c 1466 unsigned int *multicast_group)
e408762f
EJ
1467{
1468 struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
6d23c6f4 1469 const struct nlattr *mc;
2a477244 1470 struct ofpbuf *reply;
e408762f 1471 unsigned int left;
2a477244 1472 int error;
e408762f
EJ
1473
1474 *multicast_group = 0;
2a477244
BP
1475 error = do_lookup_genl_family(family_name, family_attrs, &reply);
1476 if (error) {
1477 return error;
e408762f
EJ
1478 }
1479
213a13ed 1480 if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
b3dcb73c 1481 error = EPROTO;
213a13ed
EJ
1482 goto exit;
1483 }
1484
6d23c6f4 1485 NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
e408762f
EJ
1486 static const struct nl_policy mc_policy[] = {
1487 [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
1488 [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
1489 };
1490
1491 struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
1492 const char *mc_name;
1493
1494 if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
2a477244
BP
1495 error = EPROTO;
1496 goto exit;
e408762f
EJ
1497 }
1498
1499 mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
1500 if (!strcmp(group_name, mc_name)) {
1501 *multicast_group =
1502 nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
2a477244
BP
1503 error = 0;
1504 goto exit;
e408762f
EJ
1505 }
1506 }
2a477244 1507 error = EPROTO;
e408762f 1508
2a477244
BP
1509exit:
1510 ofpbuf_delete(reply);
1511 return error;
e408762f
EJ
1512}
1513
2fe27d5a
BP
1514/* If '*number' is 0, translates the given Generic Netlink family 'name' to a
1515 * number and stores it in '*number'. If successful, returns 0 and the caller
1516 * may use '*number' as the family number. On failure, returns a positive
1517 * errno value and '*number' caches the errno value. */
1518int
1519nl_lookup_genl_family(const char *name, int *number)
1520{
1521 if (*number == 0) {
2a477244
BP
1522 struct nlattr *attrs[ARRAY_SIZE(family_policy)];
1523 struct ofpbuf *reply;
1524 int error;
1525
1526 error = do_lookup_genl_family(name, attrs, &reply);
1527 if (!error) {
1528 *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
1529 define_genl_family(*number, name);
1530 } else {
1531 *number = -error;
1532 }
1533 ofpbuf_delete(reply);
1534
cb22974d 1535 ovs_assert(*number != 0);
2fe27d5a
BP
1536 }
1537 return *number > 0 ? 0 : -*number;
1538}
a88b4e04
BP
1539\f
1540struct nl_pool {
1541 struct nl_sock *socks[16];
1542 int n;
1543};
1544
834d6caf 1545static struct ovs_mutex pool_mutex = OVS_MUTEX_INITIALIZER;
97be1538 1546static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
a88b4e04
BP
1547
1548static int
1549nl_pool_alloc(int protocol, struct nl_sock **sockp)
1550{
0bd01224 1551 struct nl_sock *sock = NULL;
a88b4e04
BP
1552 struct nl_pool *pool;
1553
1554 ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
1555
97be1538 1556 ovs_mutex_lock(&pool_mutex);
a88b4e04
BP
1557 pool = &pools[protocol];
1558 if (pool->n > 0) {
0bd01224
BP
1559 sock = pool->socks[--pool->n];
1560 }
97be1538 1561 ovs_mutex_unlock(&pool_mutex);
0bd01224
BP
1562
1563 if (sock) {
1564 *sockp = sock;
a88b4e04
BP
1565 return 0;
1566 } else {
1567 return nl_sock_create(protocol, sockp);
1568 }
1569}
1570
1571static void
1572nl_pool_release(struct nl_sock *sock)
1573{
1574 if (sock) {
1575 struct nl_pool *pool = &pools[sock->protocol];
1576
97be1538 1577 ovs_mutex_lock(&pool_mutex);
a88b4e04
BP
1578 if (pool->n < ARRAY_SIZE(pool->socks)) {
1579 pool->socks[pool->n++] = sock;
0bd01224 1580 sock = NULL;
a88b4e04 1581 }
97be1538 1582 ovs_mutex_unlock(&pool_mutex);
0bd01224
BP
1583
1584 nl_sock_destroy(sock);
a88b4e04
BP
1585 }
1586}
1587
022ad2b9
BP
1588/* Sends 'request' to the kernel on a Netlink socket for the given 'protocol'
1589 * (e.g. NETLINK_ROUTE or NETLINK_GENERIC) and waits for a response. If
1590 * successful, returns 0. On failure, returns a positive errno value.
1591 *
1592 * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
1593 * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
1594 * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
1595 * reply, if any, is discarded.
1596 *
1597 * Before the message is sent, nlmsg_len in 'request' will be finalized to
6fd6ed71 1598 * match msg->size, nlmsg_pid will be set to the pid of the socket used
022ad2b9
BP
1599 * for sending the request, and nlmsg_seq will be initialized.
1600 *
1601 * The caller is responsible for destroying 'request'.
1602 *
1603 * Bare Netlink is an unreliable transport protocol. This function layers
1604 * reliable delivery and reply semantics on top of bare Netlink.
1605 *
1606 * In Netlink, sending a request to the kernel is reliable enough, because the
1607 * kernel will tell us if the message cannot be queued (and we will in that
1608 * case put it on the transmit queue and wait until it can be delivered).
1609 *
1610 * Receiving the reply is the real problem: if the socket buffer is full when
1611 * the kernel tries to send the reply, the reply will be dropped. However, the
1612 * kernel sets a flag that a reply has been dropped. The next call to recv
1613 * then returns ENOBUFS. We can then re-send the request.
1614 *
1615 * Caveats:
1616 *
1617 * 1. Netlink depends on sequence numbers to match up requests and
1618 * replies. The sender of a request supplies a sequence number, and
1619 * the reply echos back that sequence number.
1620 *
1621 * This is fine, but (1) some kernel netlink implementations are
1622 * broken, in that they fail to echo sequence numbers and (2) this
1623 * function will drop packets with non-matching sequence numbers, so
1624 * that only a single request can be usefully transacted at a time.
1625 *
1626 * 2. Resending the request causes it to be re-executed, so the request
1627 * needs to be idempotent.
1628 */
a88b4e04
BP
1629int
1630nl_transact(int protocol, const struct ofpbuf *request,
1631 struct ofpbuf **replyp)
1632{
1633 struct nl_sock *sock;
1634 int error;
1635
1636 error = nl_pool_alloc(protocol, &sock);
1637 if (error) {
1638 *replyp = NULL;
1639 return error;
1640 }
1641
1642 error = nl_sock_transact(sock, request, replyp);
1643
1644 nl_pool_release(sock);
1645 return error;
1646}
1647
022ad2b9
BP
1648/* Sends the 'request' member of the 'n' transactions in 'transactions' on a
1649 * Netlink socket for the given 'protocol' (e.g. NETLINK_ROUTE or
1650 * NETLINK_GENERIC), in order, and receives responses to all of them. Fills in
1651 * the 'error' member of each transaction with 0 if it was successful,
1652 * otherwise with a positive errno value. If 'reply' is nonnull, then it will
1653 * be filled with the reply if the message receives a detailed reply. In other
1654 * cases, i.e. where the request failed or had no reply beyond an indication of
1655 * success, 'reply' will be cleared if it is nonnull.
1656 *
1657 * The caller is responsible for destroying each request and reply, and the
1658 * transactions array itself.
1659 *
1660 * Before sending each message, this function will finalize nlmsg_len in each
1661 * 'request' to match the ofpbuf's size, set nlmsg_pid to the pid of the socket
1662 * used for the transaction, and initialize nlmsg_seq.
1663 *
1664 * Bare Netlink is an unreliable transport protocol. This function layers
1665 * reliable delivery and reply semantics on top of bare Netlink. See
1666 * nl_transact() for some caveats.
1667 */
a88b4e04
BP
1668void
1669nl_transact_multiple(int protocol,
1670 struct nl_transaction **transactions, size_t n)
1671{
1672 struct nl_sock *sock;
1673 int error;
1674
1675 error = nl_pool_alloc(protocol, &sock);
1676 if (!error) {
1677 nl_sock_transact_multiple(sock, transactions, n);
1678 nl_pool_release(sock);
1679 } else {
1680 nl_sock_record_errors__(transactions, n, error);
1681 }
1682}
1683
2fe27d5a 1684\f
7d7447df
BP
1685static uint32_t
1686nl_sock_allocate_seq(struct nl_sock *sock, unsigned int n)
1687{
1688 uint32_t seq = sock->next_seq;
1689
1690 sock->next_seq += n;
1691
1692 /* Make it impossible for the next request for sequence numbers to wrap
1693 * around to 0. Start over with 1 to avoid ever using a sequence number of
1694 * 0, because the kernel uses sequence number 0 for notifications. */
1695 if (sock->next_seq >= UINT32_MAX / 2) {
1696 sock->next_seq = 1;
1697 }
1698
1699 return seq;
1700}
1701
2fe27d5a 1702static void
2ad204c8 1703nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
2fe27d5a
BP
1704{
1705 struct nlmsg_flag {
1706 unsigned int bits;
1707 const char *name;
1708 };
1709 static const struct nlmsg_flag flags[] = {
1710 { NLM_F_REQUEST, "REQUEST" },
1711 { NLM_F_MULTI, "MULTI" },
1712 { NLM_F_ACK, "ACK" },
1713 { NLM_F_ECHO, "ECHO" },
1714 { NLM_F_DUMP, "DUMP" },
1715 { NLM_F_ROOT, "ROOT" },
1716 { NLM_F_MATCH, "MATCH" },
1717 { NLM_F_ATOMIC, "ATOMIC" },
1718 };
1719 const struct nlmsg_flag *flag;
1720 uint16_t flags_left;
1721
1722 ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
1723 h->nlmsg_len, h->nlmsg_type);
1724 if (h->nlmsg_type == NLMSG_NOOP) {
1725 ds_put_cstr(ds, "(no-op)");
1726 } else if (h->nlmsg_type == NLMSG_ERROR) {
1727 ds_put_cstr(ds, "(error)");
1728 } else if (h->nlmsg_type == NLMSG_DONE) {
1729 ds_put_cstr(ds, "(done)");
1730 } else if (h->nlmsg_type == NLMSG_OVERRUN) {
1731 ds_put_cstr(ds, "(overrun)");
1732 } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
1733 ds_put_cstr(ds, "(reserved)");
2ad204c8
BP
1734 } else if (protocol == NETLINK_GENERIC) {
1735 ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
2fe27d5a
BP
1736 } else {
1737 ds_put_cstr(ds, "(family-defined)");
1738 }
1739 ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
1740 flags_left = h->nlmsg_flags;
1741 for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
1742 if ((flags_left & flag->bits) == flag->bits) {
1743 ds_put_format(ds, "[%s]", flag->name);
1744 flags_left &= ~flag->bits;
1745 }
1746 }
1747 if (flags_left) {
1748 ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
1749 }
2c5a6834
BP
1750 ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
1751 h->nlmsg_seq, h->nlmsg_pid);
2fe27d5a
BP
1752}
1753
1754static char *
7041c3a9 1755nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
2fe27d5a
BP
1756{
1757 struct ds ds = DS_EMPTY_INITIALIZER;
1758 const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
1759 if (h) {
2ad204c8 1760 nlmsghdr_to_string(h, protocol, &ds);
2fe27d5a
BP
1761 if (h->nlmsg_type == NLMSG_ERROR) {
1762 const struct nlmsgerr *e;
1763 e = ofpbuf_at(buffer, NLMSG_HDRLEN,
1764 NLMSG_ALIGN(sizeof(struct nlmsgerr)));
1765 if (e) {
1766 ds_put_format(&ds, " error(%d", e->error);
1767 if (e->error < 0) {
10a89ef0 1768 ds_put_format(&ds, "(%s)", ovs_strerror(-e->error));
2fe27d5a
BP
1769 }
1770 ds_put_cstr(&ds, ", in-reply-to(");
2ad204c8 1771 nlmsghdr_to_string(&e->msg, protocol, &ds);
2fe27d5a
BP
1772 ds_put_cstr(&ds, "))");
1773 } else {
1774 ds_put_cstr(&ds, " error(truncated)");
1775 }
1776 } else if (h->nlmsg_type == NLMSG_DONE) {
1777 int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
1778 if (error) {
1779 ds_put_format(&ds, " done(%d", *error);
1780 if (*error < 0) {
10a89ef0 1781 ds_put_format(&ds, "(%s)", ovs_strerror(-*error));
2fe27d5a
BP
1782 }
1783 ds_put_cstr(&ds, ")");
1784 } else {
1785 ds_put_cstr(&ds, " done(truncated)");
1786 }
7041c3a9
BP
1787 } else if (protocol == NETLINK_GENERIC) {
1788 struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
1789 if (genl) {
1790 ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
1791 genl->cmd, genl->version);
1792 }
2fe27d5a
BP
1793 }
1794 } else {
1795 ds_put_cstr(&ds, "nl(truncated)");
1796 }
1797 return ds.string;
1798}
1799
1800static void
1801log_nlmsg(const char *function, int error,
7041c3a9 1802 const void *message, size_t size, int protocol)
2fe27d5a 1803{
2fe27d5a
BP
1804 if (!VLOG_IS_DBG_ENABLED()) {
1805 return;
1806 }
1807
0a2869d5
BP
1808 struct ofpbuf buffer = ofpbuf_const_initializer(message, size);
1809 char *nlmsg = nlmsg_to_string(&buffer, protocol);
10a89ef0 1810 VLOG_DBG_RL(&rl, "%s (%s): %s", function, ovs_strerror(error), nlmsg);
2fe27d5a
BP
1811 free(nlmsg);
1812}