]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netlink-socket.c
windows: add forward declaration to dpif-netlink
[mirror_ovs.git] / lib / netlink-socket.c
CommitLineData
2fe27d5a 1/*
0a2869d5 2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
2fe27d5a
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18#include "netlink-socket.h"
2fe27d5a
BP
19#include <errno.h>
20#include <inttypes.h>
21#include <stdlib.h>
22#include <sys/types.h>
cc75061a 23#include <sys/uio.h>
2fe27d5a
BP
24#include <unistd.h>
25#include "coverage.h"
3e8a2ad1 26#include "openvswitch/dynamic-string.h"
2ad204c8 27#include "hash.h"
ee89ea7b 28#include "openvswitch/hmap.h"
2fe27d5a
BP
29#include "netlink.h"
30#include "netlink-protocol.h"
886dd35a 31#include "odp-netlink.h"
64c96779 32#include "openvswitch/ofpbuf.h"
0bd01224 33#include "ovs-thread.h"
2fe27d5a 34#include "poll-loop.h"
0672776e 35#include "seq.h"
6b7c12fd 36#include "socket-util.h"
cc75061a 37#include "util.h"
e6211adc 38#include "openvswitch/vlog.h"
2fe27d5a
BP
39
40VLOG_DEFINE_THIS_MODULE(netlink_socket);
41
42COVERAGE_DEFINE(netlink_overflow);
43COVERAGE_DEFINE(netlink_received);
fc999dda 44COVERAGE_DEFINE(netlink_recv_jumbo);
2fe27d5a
BP
45COVERAGE_DEFINE(netlink_sent);
46
47/* Linux header file confusion causes this to be undefined. */
48#ifndef SOL_NETLINK
49#define SOL_NETLINK 270
50#endif
51
52/* A single (bad) Netlink message can in theory dump out many, many log
53 * messages, so the burst size is set quite high here to avoid missing useful
54 * information. Also, at high logging levels we log *all* Netlink messages. */
55static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
56
7d7447df 57static uint32_t nl_sock_allocate_seq(struct nl_sock *, unsigned int n);
2fe27d5a 58static void log_nlmsg(const char *function, int error,
7041c3a9 59 const void *message, size_t size, int protocol);
886dd35a 60#ifdef _WIN32
ebac7fb7 61static int get_sock_pid_from_kernel(struct nl_sock *sock);
e70f55ed 62static int set_sock_property(struct nl_sock *sock);
886dd35a 63#endif
2fe27d5a
BP
64\f
65/* Netlink sockets. */
66
0d121c73 67struct nl_sock {
22326ba6
AS
68#ifdef _WIN32
69 HANDLE handle;
7fa09611 70 OVERLAPPED overlapped;
64513e68 71 DWORD read_ioctl;
22326ba6 72#else
2fe27d5a 73 int fd;
22326ba6 74#endif
7d7447df 75 uint32_t next_seq;
2fe27d5a 76 uint32_t pid;
7041c3a9 77 int protocol;
cc75061a 78 unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
2fe27d5a
BP
79};
80
cc75061a
BP
81/* Compile-time limit on iovecs, so that we can allocate a maximum-size array
82 * of iovecs on the stack. */
83#define MAX_IOVS 128
84
85/* Maximum number of iovecs that may be passed to sendmsg, capped at a
86 * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
87 *
88 * Initialized by nl_sock_create(). */
89static int max_iovs;
90
a88b4e04
BP
91static int nl_pool_alloc(int protocol, struct nl_sock **sockp);
92static void nl_pool_release(struct nl_sock *);
2fe27d5a
BP
93
94/* Creates a new netlink socket for the given netlink 'protocol'
95 * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
a88b4e04 96 * new socket if successful, otherwise returns a positive errno value. */
2fe27d5a 97int
cceb11f5 98nl_sock_create(int protocol, struct nl_sock **sockp)
2fe27d5a 99{
0bd01224 100 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2fe27d5a 101 struct nl_sock *sock;
22326ba6 102#ifndef _WIN32
2fe27d5a 103 struct sockaddr_nl local, remote;
22326ba6 104#endif
2c5a6834 105 socklen_t local_size;
d2b9f5b0 106 int rcvbuf;
2fe27d5a
BP
107 int retval = 0;
108
0bd01224 109 if (ovsthread_once_start(&once)) {
cc75061a
BP
110 int save_errno = errno;
111 errno = 0;
112
113 max_iovs = sysconf(_SC_UIO_MAXIOV);
114 if (max_iovs < _XOPEN_IOV_MAX) {
115 if (max_iovs == -1 && errno) {
10a89ef0 116 VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", ovs_strerror(errno));
cc75061a
BP
117 }
118 max_iovs = _XOPEN_IOV_MAX;
119 } else if (max_iovs > MAX_IOVS) {
120 max_iovs = MAX_IOVS;
121 }
122
123 errno = save_errno;
0bd01224 124 ovsthread_once_done(&once);
cc75061a
BP
125 }
126
2fe27d5a 127 *sockp = NULL;
488232b7 128 sock = xmalloc(sizeof *sock);
2fe27d5a 129
22326ba6 130#ifdef _WIN32
e6b298ef 131 sock->overlapped.hEvent = NULL;
7fa09611
EE
132 sock->handle = CreateFile(OVS_DEVICE_NAME_USER,
133 GENERIC_READ | GENERIC_WRITE,
134 FILE_SHARE_READ | FILE_SHARE_WRITE,
135 NULL, OPEN_EXISTING,
136 FILE_FLAG_OVERLAPPED, NULL);
22326ba6
AS
137
138 if (sock->handle == INVALID_HANDLE_VALUE) {
52a1540a 139 VLOG_ERR("fcntl: %s", ovs_lasterror_to_string());
7fa09611
EE
140 goto error;
141 }
142
143 memset(&sock->overlapped, 0, sizeof sock->overlapped);
144 sock->overlapped.hEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
145 if (sock->overlapped.hEvent == NULL) {
52a1540a 146 VLOG_ERR("fcntl: %s", ovs_lasterror_to_string());
22326ba6
AS
147 goto error;
148 }
64513e68
EE
149 /* Initialize the type/ioctl to Generic */
150 sock->read_ioctl = OVS_IOCTL_READ;
22326ba6 151#else
2fe27d5a
BP
152 sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
153 if (sock->fd < 0) {
10a89ef0 154 VLOG_ERR("fcntl: %s", ovs_strerror(errno));
2fe27d5a
BP
155 goto error;
156 }
22326ba6
AS
157#endif
158
7041c3a9 159 sock->protocol = protocol;
7d7447df 160 sock->next_seq = 1;
2fe27d5a 161
d2b9f5b0 162 rcvbuf = 1024 * 1024;
22326ba6
AS
163#ifdef _WIN32
164 sock->rcvbuf = rcvbuf;
ebac7fb7 165 retval = get_sock_pid_from_kernel(sock);
886dd35a
NR
166 if (retval != 0) {
167 goto error;
168 }
e70f55ed
SV
169 retval = set_sock_property(sock);
170 if (retval != 0) {
171 goto error;
172 }
22326ba6 173#else
d2b9f5b0
BP
174 if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
175 &rcvbuf, sizeof rcvbuf)) {
80af5ee5
BP
176 /* Only root can use SO_RCVBUFFORCE. Everyone else gets EPERM.
177 * Warn only if the failure is therefore unexpected. */
f28b6dd3 178 if (errno != EPERM) {
80af5ee5 179 VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed "
10a89ef0 180 "(%s)", rcvbuf, ovs_strerror(errno));
80af5ee5 181 }
d2b9f5b0
BP
182 }
183
cc75061a
BP
184 retval = get_socket_rcvbuf(sock->fd);
185 if (retval < 0) {
186 retval = -retval;
187 goto error;
188 }
189 sock->rcvbuf = retval;
2e467950 190 retval = 0;
cc75061a 191
2c5a6834 192 /* Connect to kernel (pid 0) as remote address. */
2fe27d5a
BP
193 memset(&remote, 0, sizeof remote);
194 remote.nl_family = AF_NETLINK;
195 remote.nl_pid = 0;
196 if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
10a89ef0 197 VLOG_ERR("connect(0): %s", ovs_strerror(errno));
2c5a6834
BP
198 goto error;
199 }
200
201 /* Obtain pid assigned by kernel. */
202 local_size = sizeof local;
203 if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
10a89ef0 204 VLOG_ERR("getsockname: %s", ovs_strerror(errno));
2c5a6834
BP
205 goto error;
206 }
207 if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
208 VLOG_ERR("getsockname returned bad Netlink name");
209 retval = EINVAL;
210 goto error;
2fe27d5a 211 }
2c5a6834 212 sock->pid = local.nl_pid;
22326ba6 213#endif
2fe27d5a 214
2fe27d5a
BP
215 *sockp = sock;
216 return 0;
217
2fe27d5a
BP
218error:
219 if (retval == 0) {
220 retval = errno;
221 if (retval == 0) {
222 retval = EINVAL;
223 }
224 }
22326ba6 225#ifdef _WIN32
7fa09611
EE
226 if (sock->overlapped.hEvent) {
227 CloseHandle(sock->overlapped.hEvent);
228 }
22326ba6
AS
229 if (sock->handle != INVALID_HANDLE_VALUE) {
230 CloseHandle(sock->handle);
231 }
232#else
2fe27d5a
BP
233 if (sock->fd >= 0) {
234 close(sock->fd);
235 }
22326ba6 236#endif
2fe27d5a
BP
237 free(sock);
238 return retval;
239}
240
c6eab56d
BP
241/* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
242 * sets '*sockp' to the new socket if successful, otherwise returns a positive
243 * errno value. */
244int
245nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
246{
247 return nl_sock_create(src->protocol, sockp);
248}
249
2fe27d5a
BP
250/* Destroys netlink socket 'sock'. */
251void
252nl_sock_destroy(struct nl_sock *sock)
253{
254 if (sock) {
22326ba6 255#ifdef _WIN32
7fa09611
EE
256 if (sock->overlapped.hEvent) {
257 CloseHandle(sock->overlapped.hEvent);
258 }
22326ba6
AS
259 CloseHandle(sock->handle);
260#else
a88b4e04 261 close(sock->fd);
22326ba6 262#endif
a88b4e04 263 free(sock);
2fe27d5a
BP
264 }
265}
266
886dd35a
NR
267#ifdef _WIN32
268/* Reads the pid for 'sock' generated in the kernel datapath. The function
190cf533
SV
269 * uses a separate IOCTL instead of a transaction semantic to avoid unnecessary
270 * message overhead. */
886dd35a 271static int
ebac7fb7 272get_sock_pid_from_kernel(struct nl_sock *sock)
886dd35a 273{
190cf533
SV
274 uint32_t pid = 0;
275 int retval = 0;
276 DWORD bytes = 0;
886dd35a 277
190cf533
SV
278 if (!DeviceIoControl(sock->handle, OVS_IOCTL_GET_PID,
279 NULL, 0, &pid, sizeof(pid),
886dd35a
NR
280 &bytes, NULL)) {
281 retval = EINVAL;
886dd35a 282 } else {
190cf533 283 if (bytes < sizeof(pid)) {
886dd35a 284 retval = EINVAL;
190cf533
SV
285 } else {
286 sock->pid = pid;
886dd35a 287 }
886dd35a 288 }
886dd35a 289
886dd35a
NR
290 return retval;
291}
e70f55ed
SV
292
293/* Used for setting and managing socket properties in userspace and kernel.
294 * Currently two attributes are tracked - pid and protocol
295 * protocol - supplied by userspace based on the netlink family. Windows uses
296 * this property to set the value in kernel datapath.
297 * eg: (NETLINK_GENERIC/ NETLINK_NETFILTER)
298 * pid - generated by windows kernel and set in userspace. The property
299 * is not modified.
300 * Also verify if Protocol and PID in Kernel reflects the values in userspace
301 * */
302static int
303set_sock_property(struct nl_sock *sock)
304{
305 static const struct nl_policy ovs_socket_policy[] = {
306 [OVS_NL_ATTR_SOCK_PROTO] = { .type = NL_A_BE32, .optional = true },
307 [OVS_NL_ATTR_SOCK_PID] = { .type = NL_A_BE32, .optional = true }
308 };
309
310 struct ofpbuf request, *reply;
311 struct ovs_header *ovs_header;
312 struct nlattr *attrs[ARRAY_SIZE(ovs_socket_policy)];
313 int retval = 0;
314 int error;
315
316 ofpbuf_init(&request, 0);
317 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
318 OVS_CTRL_CMD_SOCK_PROP, OVS_WIN_CONTROL_VERSION);
319 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
320 ovs_header->dp_ifindex = 0;
321
322 nl_msg_put_be32(&request, OVS_NL_ATTR_SOCK_PROTO, sock->protocol);
323 /* pid is already set as part of get_sock_pid_from_kernel()
324 * This is added to maintain consistency
325 */
326 nl_msg_put_be32(&request, OVS_NL_ATTR_SOCK_PID, sock->pid);
327
328 error = nl_sock_transact(sock, &request, &reply);
329 ofpbuf_uninit(&request);
330 if (error) {
331 retval = EINVAL;
332 }
333
334 if (!nl_policy_parse(reply,
335 NLMSG_HDRLEN + GENL_HDRLEN + sizeof *ovs_header,
336 ovs_socket_policy, attrs,
337 ARRAY_SIZE(ovs_socket_policy))) {
338 ofpbuf_delete(reply);
339 retval = EINVAL;
340 }
341 /* Verify if the properties are setup properly */
342 if (attrs[OVS_NL_ATTR_SOCK_PROTO]) {
343 int protocol = nl_attr_get_be32(attrs[OVS_NL_ATTR_SOCK_PROTO]);
344 if (protocol != sock->protocol) {
345 VLOG_ERR("Invalid protocol returned:%d expected:%d",
346 protocol, sock->protocol);
347 retval = EINVAL;
348 }
349 }
350
351 if (attrs[OVS_NL_ATTR_SOCK_PID]) {
352 int pid = nl_attr_get_be32(attrs[OVS_NL_ATTR_SOCK_PID]);
353 if (pid != sock->pid) {
354 VLOG_ERR("Invalid pid returned:%d expected:%d",
355 pid, sock->pid);
356 retval = EINVAL;
357 }
358 }
359
360 return retval;
361}
886dd35a
NR
362#endif /* _WIN32 */
363
b8f958ea
EE
364#ifdef _WIN32
365static int __inline
366nl_sock_mcgroup(struct nl_sock *sock, unsigned int multicast_group, bool join)
367{
368 struct ofpbuf request;
369 uint64_t request_stub[128];
370 struct ovs_header *ovs_header;
371 struct nlmsghdr *nlmsg;
372 int error;
373
374 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
375
376 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
377 OVS_CTRL_CMD_MC_SUBSCRIBE_REQ,
378 OVS_WIN_CONTROL_VERSION);
379
380 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
381 ovs_header->dp_ifindex = 0;
382
383 nl_msg_put_u32(&request, OVS_NL_ATTR_MCAST_GRP, multicast_group);
384 nl_msg_put_u8(&request, OVS_NL_ATTR_MCAST_JOIN, join ? 1 : 0);
385
386 error = nl_sock_send(sock, &request, true);
387 ofpbuf_uninit(&request);
388 return error;
389}
390#endif
cceb11f5
BP
391/* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
392 * successful, otherwise a positive errno value.
393 *
a838c4fe
BP
394 * A socket that is subscribed to a multicast group that receives asynchronous
395 * notifications must not be used for Netlink transactions or dumps, because
396 * transactions and dumps can cause notifications to be lost.
397 *
cceb11f5
BP
398 * Multicast group numbers are always positive.
399 *
400 * It is not an error to attempt to join a multicast group to which a socket
401 * already belongs. */
402int
403nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
404{
22326ba6 405#ifdef _WIN32
64513e68
EE
406 /* Set the socket type as a "multicast" socket */
407 sock->read_ioctl = OVS_IOCTL_READ_EVENT;
b8f958ea
EE
408 int error = nl_sock_mcgroup(sock, multicast_group, true);
409 if (error) {
64513e68 410 sock->read_ioctl = OVS_IOCTL_READ;
b8f958ea 411 VLOG_WARN("could not join multicast group %u (%s)",
15fd9052
NR
412 multicast_group, ovs_strerror(error));
413 return error;
b8f958ea 414 }
22326ba6 415#else
cceb11f5
BP
416 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
417 &multicast_group, sizeof multicast_group) < 0) {
418 VLOG_WARN("could not join multicast group %u (%s)",
10a89ef0 419 multicast_group, ovs_strerror(errno));
cceb11f5
BP
420 return errno;
421 }
22326ba6 422#endif
cceb11f5
BP
423 return 0;
424}
425
36791e21
NR
426#ifdef _WIN32
427int
428nl_sock_subscribe_packets(struct nl_sock *sock)
429{
430 int error;
431
432 if (sock->read_ioctl != OVS_IOCTL_READ) {
433 return EINVAL;
434 }
435
436 error = nl_sock_subscribe_packet__(sock, true);
437 if (error) {
d8d1ef2f
AS
438 VLOG_WARN("could not subscribe packets (%s)",
439 ovs_strerror(error));
36791e21
NR
440 return error;
441 }
442 sock->read_ioctl = OVS_IOCTL_READ_PACKET;
443
444 return 0;
445}
446
447int
448nl_sock_unsubscribe_packets(struct nl_sock *sock)
449{
450 ovs_assert(sock->read_ioctl == OVS_IOCTL_READ_PACKET);
451
452 int error = nl_sock_subscribe_packet__(sock, false);
453 if (error) {
d8d1ef2f
AS
454 VLOG_WARN("could not unsubscribe to packets (%s)",
455 ovs_strerror(error));
36791e21
NR
456 return error;
457 }
458
459 sock->read_ioctl = OVS_IOCTL_READ;
460 return 0;
461}
462
463int
464nl_sock_subscribe_packet__(struct nl_sock *sock, bool subscribe)
465{
466 struct ofpbuf request;
467 uint64_t request_stub[128];
468 struct ovs_header *ovs_header;
469 struct nlmsghdr *nlmsg;
470 int error;
471
472 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
473 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
474 OVS_CTRL_CMD_PACKET_SUBSCRIBE_REQ,
475 OVS_WIN_CONTROL_VERSION);
476
477 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
478 ovs_header->dp_ifindex = 0;
479 nl_msg_put_u8(&request, OVS_NL_ATTR_PACKET_SUBSCRIBE, subscribe ? 1 : 0);
480 nl_msg_put_u32(&request, OVS_NL_ATTR_PACKET_PID, sock->pid);
481
482 error = nl_sock_send(sock, &request, true);
483 ofpbuf_uninit(&request);
484 return error;
485}
486#endif
487
cceb11f5
BP
488/* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
489 * successful, otherwise a positive errno value.
490 *
491 * Multicast group numbers are always positive.
492 *
493 * It is not an error to attempt to leave a multicast group to which a socket
494 * does not belong.
495 *
496 * On success, reading from 'sock' will still return any messages that were
497 * received on 'multicast_group' before the group was left. */
498int
499nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
500{
22326ba6 501#ifdef _WIN32
b8f958ea
EE
502 int error = nl_sock_mcgroup(sock, multicast_group, false);
503 if (error) {
504 VLOG_WARN("could not leave multicast group %u (%s)",
15fd9052
NR
505 multicast_group, ovs_strerror(error));
506 return error;
b8f958ea 507 }
64513e68 508 sock->read_ioctl = OVS_IOCTL_READ;
22326ba6 509#else
cceb11f5
BP
510 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
511 &multicast_group, sizeof multicast_group) < 0) {
512 VLOG_WARN("could not leave multicast group %u (%s)",
10a89ef0 513 multicast_group, ovs_strerror(errno));
cceb11f5
BP
514 return errno;
515 }
22326ba6 516#endif
cceb11f5
BP
517 return 0;
518}
519
c6eab56d 520static int
ff459dd6
BP
521nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg,
522 uint32_t nlmsg_seq, bool wait)
2fe27d5a
BP
523{
524 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
525 int error;
526
6fd6ed71 527 nlmsg->nlmsg_len = msg->size;
ff459dd6 528 nlmsg->nlmsg_seq = nlmsg_seq;
2fe27d5a
BP
529 nlmsg->nlmsg_pid = sock->pid;
530 do {
531 int retval;
22326ba6 532#ifdef _WIN32
fd972eb8
NR
533 DWORD bytes;
534
535 if (!DeviceIoControl(sock->handle, OVS_IOCTL_WRITE,
6fd6ed71 536 msg->data, msg->size, NULL, 0,
0fd22ae2 537 &bytes, NULL)) {
22326ba6 538 retval = -1;
fd972eb8
NR
539 /* XXX: Map to a more appropriate error based on GetLastError(). */
540 errno = EINVAL;
a51a5086
NR
541 VLOG_DBG_RL(&rl, "fatal driver failure in write: %s",
542 ovs_lasterror_to_string());
fd972eb8 543 } else {
6fd6ed71 544 retval = msg->size;
22326ba6
AS
545 }
546#else
6fd6ed71 547 retval = send(sock->fd, msg->data, msg->size,
fd972eb8 548 wait ? 0 : MSG_DONTWAIT);
22326ba6 549#endif
2fe27d5a
BP
550 error = retval < 0 ? errno : 0;
551 } while (error == EINTR);
6fd6ed71 552 log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol);
2fe27d5a
BP
553 if (!error) {
554 COVERAGE_INC(netlink_sent);
555 }
556 return error;
557}
558
c6eab56d 559/* Tries to send 'msg', which must contain a Netlink message, to the kernel on
6fd6ed71 560 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
ff459dd6
BP
561 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to a fresh
562 * sequence number, before the message is sent.
c6eab56d
BP
563 *
564 * Returns 0 if successful, otherwise a positive errno value. If
565 * 'wait' is true, then the send will wait until buffer space is ready;
566 * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
567int
568nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
ff459dd6
BP
569{
570 return nl_sock_send_seq(sock, msg, nl_sock_allocate_seq(sock, 1), wait);
571}
572
573/* Tries to send 'msg', which must contain a Netlink message, to the kernel on
6fd6ed71 574 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
ff459dd6
BP
575 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to
576 * 'nlmsg_seq', before the message is sent.
577 *
578 * Returns 0 if successful, otherwise a positive errno value. If
579 * 'wait' is true, then the send will wait until buffer space is ready;
580 * otherwise, returns EAGAIN if the 'sock' send buffer is full.
581 *
582 * This function is suitable for sending a reply to a request that was received
583 * with sequence number 'nlmsg_seq'. Otherwise, use nl_sock_send() instead. */
584int
585nl_sock_send_seq(struct nl_sock *sock, const struct ofpbuf *msg,
586 uint32_t nlmsg_seq, bool wait)
c6eab56d 587{
ff459dd6 588 return nl_sock_send__(sock, msg, nlmsg_seq, wait);
c6eab56d
BP
589}
590
c6eab56d 591static int
72d32ac0 592nl_sock_recv__(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
2fe27d5a 593{
72d32ac0
BP
594 /* We can't accurately predict the size of the data to be received. The
595 * caller is supposed to have allocated enough space in 'buf' to handle the
596 * "typical" case. To handle exceptions, we make available enough space in
597 * 'tail' to allow Netlink messages to be up to 64 kB long (a reasonable
598 * figure since that's the maximum length of a Netlink attribute). */
2fe27d5a 599 struct nlmsghdr *nlmsghdr;
72d32ac0 600 uint8_t tail[65536];
fc999dda 601 struct iovec iov[2];
fc999dda
BP
602 struct msghdr msg;
603 ssize_t retval;
8f20fd98 604 int error;
fc999dda 605
cb22974d 606 ovs_assert(buf->allocated >= sizeof *nlmsghdr);
72d32ac0 607 ofpbuf_clear(buf);
2fe27d5a 608
6fd6ed71 609 iov[0].iov_base = buf->base;
72d32ac0 610 iov[0].iov_len = buf->allocated;
fc999dda 611 iov[1].iov_base = tail;
72d32ac0 612 iov[1].iov_len = sizeof tail;
fc999dda
BP
613
614 memset(&msg, 0, sizeof msg);
615 msg.msg_iov = iov;
616 msg.msg_iovlen = 2;
617
8f20fd98
BP
618 /* Receive a Netlink message from the kernel.
619 *
620 * This works around a kernel bug in which the kernel returns an error code
621 * as if it were the number of bytes read. It doesn't actually modify
622 * anything in the receive buffer in that case, so we can initialize the
623 * Netlink header with an impossible message length and then, upon success,
624 * check whether it changed. */
6fd6ed71 625 nlmsghdr = buf->base;
2fe27d5a 626 do {
8f20fd98 627 nlmsghdr->nlmsg_len = UINT32_MAX;
22326ba6 628#ifdef _WIN32
fd972eb8 629 DWORD bytes;
64513e68 630 if (!DeviceIoControl(sock->handle, sock->read_ioctl,
fd972eb8 631 NULL, 0, tail, sizeof tail, &bytes, NULL)) {
a51a5086 632 VLOG_DBG_RL(&rl, "fatal driver failure in transact: %s",
d71c423e 633 ovs_lasterror_to_string());
22326ba6 634 retval = -1;
a51a5086 635 /* XXX: Map to a more appropriate error. */
fd972eb8 636 errno = EINVAL;
22326ba6 637 } else {
fd972eb8
NR
638 retval = bytes;
639 if (retval == 0) {
640 retval = -1;
641 errno = EAGAIN;
642 } else {
643 if (retval >= buf->allocated) {
644 ofpbuf_reinit(buf, retval);
6fd6ed71 645 nlmsghdr = buf->base;
15fd9052 646 nlmsghdr->nlmsg_len = UINT32_MAX;
fd972eb8 647 }
6fd6ed71
PS
648 memcpy(buf->data, tail, retval);
649 buf->size = retval;
fd972eb8 650 }
22326ba6
AS
651 }
652#else
fc999dda 653 retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
22326ba6 654#endif
8f20fd98
BP
655 error = (retval < 0 ? errno
656 : retval == 0 ? ECONNRESET /* not possible? */
657 : nlmsghdr->nlmsg_len != UINT32_MAX ? 0
7f8e2646 658 : retval);
8f20fd98
BP
659 } while (error == EINTR);
660 if (error) {
fc999dda
BP
661 if (error == ENOBUFS) {
662 /* Socket receive buffer overflow dropped one or more messages that
663 * the kernel tried to send to us. */
664 COVERAGE_INC(netlink_overflow);
665 }
fc999dda 666 return error;
2fe27d5a 667 }
fc999dda 668
2fe27d5a 669 if (msg.msg_flags & MSG_TRUNC) {
34582733 670 VLOG_ERR_RL(&rl, "truncated message (longer than %"PRIuSIZE" bytes)",
72d32ac0 671 sizeof tail);
fc999dda 672 return E2BIG;
2fe27d5a 673 }
2fe27d5a 674
fc999dda 675 if (retval < sizeof *nlmsghdr
2fe27d5a 676 || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
fc999dda 677 || nlmsghdr->nlmsg_len > retval) {
e5e4b47c 678 VLOG_ERR_RL(&rl, "received invalid nlmsg (%"PRIuSIZE" bytes < %"PRIuSIZE")",
72d32ac0 679 retval, sizeof *nlmsghdr);
2fe27d5a
BP
680 return EPROTO;
681 }
22326ba6 682#ifndef _WIN32
6fd6ed71 683 buf->size = MIN(retval, buf->allocated);
72d32ac0
BP
684 if (retval > buf->allocated) {
685 COVERAGE_INC(netlink_recv_jumbo);
686 ofpbuf_put(buf, tail, retval - buf->allocated);
687 }
22326ba6 688#endif
72d32ac0 689
6fd6ed71 690 log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol);
2fe27d5a
BP
691 COVERAGE_INC(netlink_received);
692
693 return 0;
694}
695
72d32ac0
BP
696/* Tries to receive a Netlink message from the kernel on 'sock' into 'buf'. If
697 * 'wait' is true, waits for a message to be ready. Otherwise, fails with
698 * EAGAIN if the 'sock' receive buffer is empty.
699 *
700 * The caller must have initialized 'buf' with an allocation of at least
701 * NLMSG_HDRLEN bytes. For best performance, the caller should allocate enough
702 * space for a "typical" message.
703 *
704 * On success, returns 0 and replaces 'buf''s previous content by the received
705 * message. This function expands 'buf''s allocated memory, as necessary, to
706 * hold the actual size of the received message.
c6eab56d 707 *
72d32ac0
BP
708 * On failure, returns a positive errno value and clears 'buf' to zero length.
709 * 'buf' retains its previous memory allocation.
710 *
711 * Regardless of success or failure, this function resets 'buf''s headroom to
712 * 0. */
c6eab56d 713int
72d32ac0 714nl_sock_recv(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
c6eab56d 715{
72d32ac0 716 return nl_sock_recv__(sock, buf, wait);
cc75061a
BP
717}
718
719static void
720nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
721 int error)
722{
723 size_t i;
724
725 for (i = 0; i < n; i++) {
72d32ac0
BP
726 struct nl_transaction *txn = transactions[i];
727
728 txn->error = error;
729 if (txn->reply) {
730 ofpbuf_clear(txn->reply);
731 }
cc75061a
BP
732 }
733}
734
735static int
736nl_sock_transact_multiple__(struct nl_sock *sock,
737 struct nl_transaction **transactions, size_t n,
738 size_t *done)
739{
72d32ac0
BP
740 uint64_t tmp_reply_stub[1024 / 8];
741 struct nl_transaction tmp_txn;
742 struct ofpbuf tmp_reply;
743
744 uint32_t base_seq;
cc75061a
BP
745 struct iovec iovs[MAX_IOVS];
746 struct msghdr msg;
747 int error;
748 int i;
749
72d32ac0 750 base_seq = nl_sock_allocate_seq(sock, n);
cc75061a
BP
751 *done = 0;
752 for (i = 0; i < n; i++) {
72d32ac0
BP
753 struct nl_transaction *txn = transactions[i];
754 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);
cc75061a 755
6fd6ed71 756 nlmsg->nlmsg_len = txn->request->size;
72d32ac0 757 nlmsg->nlmsg_seq = base_seq + i;
cc75061a 758 nlmsg->nlmsg_pid = sock->pid;
cc75061a 759
6fd6ed71
PS
760 iovs[i].iov_base = txn->request->data;
761 iovs[i].iov_len = txn->request->size;
cc75061a
BP
762 }
763
0fd22ae2 764#ifndef _WIN32
cc75061a
BP
765 memset(&msg, 0, sizeof msg);
766 msg.msg_iov = iovs;
767 msg.msg_iovlen = n;
768 do {
769 error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
770 } while (error == EINTR);
771
772 for (i = 0; i < n; i++) {
72d32ac0 773 struct nl_transaction *txn = transactions[i];
cc75061a 774
6fd6ed71
PS
775 log_nlmsg(__func__, error, txn->request->data,
776 txn->request->size, sock->protocol);
cc75061a
BP
777 }
778 if (!error) {
779 COVERAGE_ADD(netlink_sent, n);
780 }
781
782 if (error) {
783 return error;
784 }
785
72d32ac0
BP
786 ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
787 tmp_txn.request = NULL;
788 tmp_txn.reply = &tmp_reply;
789 tmp_txn.error = 0;
cc75061a 790 while (n > 0) {
72d32ac0
BP
791 struct nl_transaction *buf_txn, *txn;
792 uint32_t seq;
793
794 /* Find a transaction whose buffer we can use for receiving a reply.
795 * If no such transaction is left, use tmp_txn. */
796 buf_txn = &tmp_txn;
797 for (i = 0; i < n; i++) {
798 if (transactions[i]->reply) {
799 buf_txn = transactions[i];
800 break;
801 }
802 }
cc75061a 803
72d32ac0
BP
804 /* Receive a reply. */
805 error = nl_sock_recv__(sock, buf_txn->reply, false);
806 if (error) {
807 if (error == EAGAIN) {
808 nl_sock_record_errors__(transactions, n, 0);
809 *done += n;
810 error = 0;
811 }
812 break;
cc75061a
BP
813 }
814
72d32ac0
BP
815 /* Match the reply up with a transaction. */
816 seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
817 if (seq < base_seq || seq >= base_seq + n) {
818 VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
cc75061a
BP
819 continue;
820 }
72d32ac0
BP
821 i = seq - base_seq;
822 txn = transactions[i];
cc75061a 823
72d32ac0
BP
824 /* Fill in the results for 'txn'. */
825 if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
826 if (txn->reply) {
827 ofpbuf_clear(txn->reply);
828 }
829 if (txn->error) {
cc75061a 830 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
10a89ef0 831 error, ovs_strerror(txn->error));
cc75061a 832 }
cc75061a 833 } else {
72d32ac0
BP
834 txn->error = 0;
835 if (txn->reply && txn != buf_txn) {
836 /* Swap buffers. */
837 struct ofpbuf *reply = buf_txn->reply;
838 buf_txn->reply = txn->reply;
839 txn->reply = reply;
840 }
cc75061a
BP
841 }
842
72d32ac0
BP
843 /* Fill in the results for transactions before 'txn'. (We have to do
844 * this after the results for 'txn' itself because of the buffer swap
845 * above.) */
846 nl_sock_record_errors__(transactions, i, 0);
847
848 /* Advance. */
cc75061a
BP
849 *done += i + 1;
850 transactions += i + 1;
851 n -= i + 1;
72d32ac0 852 base_seq += i + 1;
cc75061a 853 }
72d32ac0 854 ofpbuf_uninit(&tmp_reply);
0fd22ae2
NR
855#else
856 error = 0;
9189184d 857 uint8_t reply_buf[65536];
0fd22ae2
NR
858 for (i = 0; i < n; i++) {
859 DWORD reply_len;
a51a5086 860 bool ret;
0fd22ae2
NR
861 struct nl_transaction *txn = transactions[i];
862 struct nlmsghdr *request_nlmsg, *reply_nlmsg;
863
a51a5086
NR
864 ret = DeviceIoControl(sock->handle, OVS_IOCTL_TRANSACT,
865 txn->request->data,
866 txn->request->size,
867 reply_buf, sizeof reply_buf,
868 &reply_len, NULL);
869
870 if (ret && reply_len == 0) {
871 /*
872 * The current transaction did not produce any data to read and that
873 * is not an error as such. Continue with the remainder of the
874 * transactions.
875 */
876 txn->error = 0;
877 if (txn->reply) {
878 ofpbuf_clear(txn->reply);
879 }
880 } else if (!ret) {
0fd22ae2
NR
881 /* XXX: Map to a more appropriate error. */
882 error = EINVAL;
b937e116
BP
883 VLOG_DBG_RL(&rl, "fatal driver failure: %s",
884 ovs_lasterror_to_string());
0fd22ae2
NR
885 break;
886 }
887
a51a5086
NR
888 if (reply_len != 0) {
889 if (reply_len < sizeof *reply_nlmsg) {
890 nl_sock_record_errors__(transactions, n, 0);
891 VLOG_DBG_RL(&rl, "insufficient length of reply %#"PRIu32
892 " for seq: %#"PRIx32, reply_len, request_nlmsg->nlmsg_seq);
893 break;
894 }
0fd22ae2 895
a51a5086
NR
896 /* Validate the sequence number in the reply. */
897 request_nlmsg = nl_msg_nlmsghdr(txn->request);
898 reply_nlmsg = (struct nlmsghdr *)reply_buf;
0fd22ae2 899
a51a5086
NR
900 if (request_nlmsg->nlmsg_seq != reply_nlmsg->nlmsg_seq) {
901 ovs_assert(request_nlmsg->nlmsg_seq == reply_nlmsg->nlmsg_seq);
902 VLOG_DBG_RL(&rl, "mismatched seq request %#"PRIx32
903 ", reply %#"PRIx32, request_nlmsg->nlmsg_seq,
904 reply_nlmsg->nlmsg_seq);
905 break;
0fd22ae2 906 }
a51a5086
NR
907
908 /* Handle errors embedded within the netlink message. */
909 ofpbuf_use_stub(&tmp_reply, reply_buf, sizeof reply_buf);
910 tmp_reply.size = sizeof reply_buf;
911 if (nl_msg_nlmsgerr(&tmp_reply, &txn->error)) {
912 if (txn->reply) {
913 ofpbuf_clear(txn->reply);
914 }
915 if (txn->error) {
916 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
917 error, ovs_strerror(txn->error));
918 }
919 } else {
920 txn->error = 0;
921 if (txn->reply) {
922 /* Copy the reply to the buffer specified by the caller. */
923 if (reply_len > txn->reply->allocated) {
924 ofpbuf_reinit(txn->reply, reply_len);
925 }
926 memcpy(txn->reply->data, reply_buf, reply_len);
927 txn->reply->size = reply_len;
0fd22ae2 928 }
0fd22ae2 929 }
a51a5086 930 ofpbuf_uninit(&tmp_reply);
0fd22ae2
NR
931 }
932
933 /* Count the number of successful transactions. */
934 (*done)++;
9189184d 935
0fd22ae2
NR
936 }
937
938 if (!error) {
939 COVERAGE_ADD(netlink_sent, n);
940 }
941#endif
cc75061a 942
72d32ac0 943 return error;
cc75061a
BP
944}
945
022ad2b9 946static void
cc75061a
BP
947nl_sock_transact_multiple(struct nl_sock *sock,
948 struct nl_transaction **transactions, size_t n)
949{
950 int max_batch_count;
951 int error;
952
953 if (!n) {
954 return;
955 }
956
cc75061a
BP
957 /* In theory, every request could have a 64 kB reply. But the default and
958 * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
959 * be a bit below 128 kB, so that would only allow a single message in a
960 * "batch". So we assume that replies average (at most) 4 kB, which allows
961 * a good deal of batching.
962 *
963 * In practice, most of the requests that we batch either have no reply at
964 * all or a brief reply. */
965 max_batch_count = MAX(sock->rcvbuf / 4096, 1);
966 max_batch_count = MIN(max_batch_count, max_iovs);
967
968 while (n > 0) {
969 size_t count, bytes;
970 size_t done;
971
972 /* Batch up to 'max_batch_count' transactions. But cap it at about a
973 * page of requests total because big skbuffs are expensive to
974 * allocate in the kernel. */
975#if defined(PAGESIZE)
976 enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
977#else
978 enum { MAX_BATCH_BYTES = 4096 - 512 };
979#endif
6fd6ed71 980 bytes = transactions[0]->request->size;
cc75061a 981 for (count = 1; count < n && count < max_batch_count; count++) {
6fd6ed71 982 if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
cc75061a
BP
983 break;
984 }
6fd6ed71 985 bytes += transactions[count]->request->size;
cc75061a
BP
986 }
987
988 error = nl_sock_transact_multiple__(sock, transactions, count, &done);
989 transactions += done;
990 n -= done;
991
992 if (error == ENOBUFS) {
993 VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
994 } else if (error) {
10a89ef0 995 VLOG_ERR_RL(&rl, "transaction error (%s)", ovs_strerror(error));
cc75061a 996 nl_sock_record_errors__(transactions, n, error);
b937e116
BP
997 if (error != EAGAIN) {
998 /* A fatal error has occurred. Abort the rest of
999 * transactions. */
1000 break;
1001 }
cc75061a
BP
1002 }
1003 }
1004}
1005
022ad2b9 1006static int
cc75061a
BP
1007nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
1008 struct ofpbuf **replyp)
2fe27d5a 1009{
cc75061a
BP
1010 struct nl_transaction *transactionp;
1011 struct nl_transaction transaction;
2fe27d5a 1012
ebc56baa 1013 transaction.request = CONST_CAST(struct ofpbuf *, request);
72d32ac0 1014 transaction.reply = replyp ? ofpbuf_new(1024) : NULL;
cc75061a 1015 transactionp = &transaction;
72d32ac0 1016
cc75061a 1017 nl_sock_transact_multiple(sock, &transactionp, 1);
72d32ac0 1018
2fe27d5a 1019 if (replyp) {
72d32ac0
BP
1020 if (transaction.error) {
1021 ofpbuf_delete(transaction.reply);
1022 *replyp = NULL;
1023 } else {
1024 *replyp = transaction.reply;
1025 }
2fe27d5a 1026 }
72d32ac0 1027
cc75061a 1028 return transaction.error;
2fe27d5a
BP
1029}
1030
6b7c12fd
BP
1031/* Drain all the messages currently in 'sock''s receive queue. */
1032int
1033nl_sock_drain(struct nl_sock *sock)
1034{
22326ba6
AS
1035#ifdef _WIN32
1036 return 0;
1037#else
6b7c12fd 1038 return drain_rcvbuf(sock->fd);
22326ba6 1039#endif
6b7c12fd
BP
1040}
1041
a88b4e04
BP
1042/* Starts a Netlink "dump" operation, by sending 'request' to the kernel on a
1043 * Netlink socket created with the given 'protocol', and initializes 'dump' to
1044 * reflect the state of the operation.
2fe27d5a 1045 *
db1fc210
JS
1046 * 'request' must contain a Netlink message. Before sending the message,
1047 * nlmsg_len will be finalized to match request->size, and nlmsg_pid will be
1048 * set to the Netlink socket's pid. NLM_F_DUMP and NLM_F_ACK will be set in
1049 * nlmsg_flags.
2fe27d5a 1050 *
a88b4e04 1051 * The design of this Netlink socket library ensures that the dump is reliable.
2fe27d5a 1052 *
db1fc210
JS
1053 * This function provides no status indication. nl_dump_done() provides an
1054 * error status for the entire dump operation.
2fe27d5a 1055 *
db1fc210 1056 * The caller must eventually destroy 'request'.
2fe27d5a
BP
1057 */
1058void
a88b4e04 1059nl_dump_start(struct nl_dump *dump, int protocol, const struct ofpbuf *request)
2fe27d5a 1060{
7d7447df 1061 nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
93295354
BP
1062
1063 ovs_mutex_init(&dump->mutex);
1064 ovs_mutex_lock(&dump->mutex);
1065 dump->status = nl_pool_alloc(protocol, &dump->sock);
1066 if (!dump->status) {
1067 dump->status = nl_sock_send__(dump->sock, request,
1068 nl_sock_allocate_seq(dump->sock, 1),
1069 true);
b2d1c78a 1070 }
9c8ad495 1071 dump->nl_seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
93295354
BP
1072 ovs_mutex_unlock(&dump->mutex);
1073}
1074
1075static int
1076nl_dump_refill(struct nl_dump *dump, struct ofpbuf *buffer)
1077 OVS_REQUIRES(dump->mutex)
1078{
1079 struct nlmsghdr *nlmsghdr;
1080 int error;
1081
6fd6ed71 1082 while (!buffer->size) {
1738803a 1083 error = nl_sock_recv__(dump->sock, buffer, false);
93295354 1084 if (error) {
1738803a
AW
1085 /* The kernel never blocks providing the results of a dump, so
1086 * error == EAGAIN means that we've read the whole thing, and
1087 * therefore transform it into EOF. (The kernel always provides
1088 * NLMSG_DONE as a sentinel. Some other thread must have received
1089 * that already but not yet signaled it in 'status'.)
1090 *
1091 * Any other error is just an error. */
93295354
BP
1092 return error == EAGAIN ? EOF : error;
1093 }
1094
1095 nlmsghdr = nl_msg_nlmsghdr(buffer);
1096 if (dump->nl_seq != nlmsghdr->nlmsg_seq) {
1097 VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
1098 nlmsghdr->nlmsg_seq, dump->nl_seq);
1099 ofpbuf_clear(buffer);
1100 }
1101 }
1102
1103 if (nl_msg_nlmsgerr(buffer, &error) && error) {
1104 VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
1105 ovs_strerror(error));
1106 ofpbuf_clear(buffer);
1107 return error;
1108 }
1109
1110 return 0;
1111}
1112
1113static int
1114nl_dump_next__(struct ofpbuf *reply, struct ofpbuf *buffer)
1115{
1116 struct nlmsghdr *nlmsghdr = nl_msg_next(buffer, reply);
1117 if (!nlmsghdr) {
1118 VLOG_WARN_RL(&rl, "netlink dump contains message fragment");
1119 return EPROTO;
1120 } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
1121 return EOF;
1122 } else {
1123 return 0;
1124 }
2fe27d5a
BP
1125}
1126
d57695d7
JS
1127/* Attempts to retrieve another reply from 'dump' into 'buffer'. 'dump' must
1128 * have been initialized with nl_dump_start(), and 'buffer' must have been
1129 * initialized. 'buffer' should be at least NL_DUMP_BUFSIZE bytes long.
2fe27d5a 1130 *
19aa20a0 1131 * If successful, returns true and points 'reply->data' and
6fd6ed71 1132 * 'reply->size' to the message that was retrieved. The caller must not
19aa20a0
BP
1133 * modify 'reply' (because it points within 'buffer', which will be used by
1134 * future calls to this function).
1135 *
1136 * On failure, returns false and sets 'reply->data' to NULL and
6fd6ed71 1137 * 'reply->size' to 0. Failure might indicate an actual error or merely
19aa20a0
BP
1138 * the end of replies. An error status for the entire dump operation is
1139 * provided when it is completed by calling nl_dump_done().
0672776e
JS
1140 *
1141 * Multiple threads may call this function, passing the same nl_dump, however
1142 * each must provide independent buffers. This function may cache multiple
1143 * replies in the buffer, and these will be processed before more replies are
1144 * fetched. When this function returns false, other threads may continue to
1145 * process replies in their buffers, but they will not fetch more replies.
2fe27d5a
BP
1146 */
1147bool
d57695d7 1148nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply, struct ofpbuf *buffer)
2fe27d5a 1149{
93295354 1150 int retval = 0;
0672776e 1151
93295354
BP
1152 /* If the buffer is empty, refill it.
1153 *
1154 * If the buffer is not empty, we don't check the dump's status.
1155 * Otherwise, we could end up skipping some of the dump results if thread A
1156 * hits EOF while thread B is in the midst of processing a batch. */
6fd6ed71 1157 if (!buffer->size) {
0791315e 1158 ovs_mutex_lock(&dump->mutex);
93295354
BP
1159 if (!dump->status) {
1160 /* Take the mutex here to avoid an in-kernel race. If two threads
1161 * try to read from a Netlink dump socket at once, then the socket
1162 * error can be set to EINVAL, which will be encountered on the
1163 * next recv on that socket, which could be anywhere due to the way
1164 * that we pool Netlink sockets. Serializing the recv calls avoids
1165 * the issue. */
1166 dump->status = nl_dump_refill(dump, buffer);
1167 }
1168 retval = dump->status;
0791315e 1169 ovs_mutex_unlock(&dump->mutex);
93295354 1170 }
0791315e 1171
93295354
BP
1172 /* Fetch the next message from the buffer. */
1173 if (!retval) {
1174 retval = nl_dump_next__(reply, buffer);
2fe27d5a 1175 if (retval) {
93295354
BP
1176 /* Record 'retval' as the dump status, but don't overwrite an error
1177 * with EOF. */
1178 ovs_mutex_lock(&dump->mutex);
1179 if (dump->status <= 0) {
1180 dump->status = retval;
2fe27d5a 1181 }
93295354 1182 ovs_mutex_unlock(&dump->mutex);
2fe27d5a 1183 }
2fe27d5a
BP
1184 }
1185
93295354 1186 if (retval) {
6fd6ed71
PS
1187 reply->data = NULL;
1188 reply->size = 0;
0672776e 1189 }
93295354 1190 return !retval;
2fe27d5a
BP
1191}
1192
1193/* Completes Netlink dump operation 'dump', which must have been initialized
1194 * with nl_dump_start(). Returns 0 if the dump operation was error-free,
1195 * otherwise a positive errno value describing the problem. */
1196int
1197nl_dump_done(struct nl_dump *dump)
1198{
0672776e 1199 int status;
d57695d7 1200
93295354
BP
1201 ovs_mutex_lock(&dump->mutex);
1202 status = dump->status;
1203 ovs_mutex_unlock(&dump->mutex);
1204
2fe27d5a 1205 /* Drain any remaining messages that the client didn't read. Otherwise the
a88b4e04
BP
1206 * kernel will continue to queue them up and waste buffer space.
1207 *
1208 * XXX We could just destroy and discard the socket in this case. */
0672776e
JS
1209 if (!status) {
1210 uint64_t tmp_reply_stub[NL_DUMP_BUFSIZE / 8];
1211 struct ofpbuf reply, buf;
1212
1213 ofpbuf_use_stub(&buf, tmp_reply_stub, sizeof tmp_reply_stub);
1214 while (nl_dump_next(dump, &reply, &buf)) {
1215 /* Nothing to do. */
2fe27d5a 1216 }
0672776e 1217 ofpbuf_uninit(&buf);
93295354
BP
1218
1219 ovs_mutex_lock(&dump->mutex);
1220 status = dump->status;
1221 ovs_mutex_unlock(&dump->mutex);
1222 ovs_assert(status);
2fe27d5a 1223 }
93295354 1224
a88b4e04 1225 nl_pool_release(dump->sock);
0791315e 1226 ovs_mutex_destroy(&dump->mutex);
93295354
BP
1227
1228 return status == EOF ? 0 : status;
2fe27d5a
BP
1229}
1230
7fa09611
EE
1231#ifdef _WIN32
1232/* Pend an I/O request in the driver. The driver completes the I/O whenever
1233 * an event or a packet is ready to be read. Once the I/O is completed
1234 * the overlapped structure event associated with the pending I/O will be set
1235 */
1236static int
8341662d 1237pend_io_request(struct nl_sock *sock)
7fa09611
EE
1238{
1239 struct ofpbuf request;
1240 uint64_t request_stub[128];
1241 struct ovs_header *ovs_header;
1242 struct nlmsghdr *nlmsg;
1243 uint32_t seq;
b91d3d03 1244 int retval = 0;
7fa09611
EE
1245 int error;
1246 DWORD bytes;
64513e68 1247 OVERLAPPED *overlapped = CONST_CAST(OVERLAPPED *, &sock->overlapped);
b91d3d03
NR
1248 uint16_t cmd = OVS_CTRL_CMD_WIN_PEND_PACKET_REQ;
1249
1250 ovs_assert(sock->read_ioctl == OVS_IOCTL_READ_PACKET ||
1251 sock->read_ioctl == OVS_IOCTL_READ_EVENT);
1252 if (sock->read_ioctl == OVS_IOCTL_READ_EVENT) {
1253 cmd = OVS_CTRL_CMD_WIN_PEND_REQ;
1254 }
7fa09611
EE
1255
1256 int ovs_msg_size = sizeof (struct nlmsghdr) + sizeof (struct genlmsghdr) +
1257 sizeof (struct ovs_header);
1258
1259 ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
1260
1261 seq = nl_sock_allocate_seq(sock, 1);
1262 nl_msg_put_genlmsghdr(&request, 0, OVS_WIN_NL_CTRL_FAMILY_ID, 0,
b91d3d03 1263 cmd, OVS_WIN_CONTROL_VERSION);
7fa09611
EE
1264 nlmsg = nl_msg_nlmsghdr(&request);
1265 nlmsg->nlmsg_seq = seq;
92a5068f 1266 nlmsg->nlmsg_pid = sock->pid;
7fa09611
EE
1267
1268 ovs_header = ofpbuf_put_uninit(&request, sizeof *ovs_header);
1269 ovs_header->dp_ifindex = 0;
e6b298ef 1270 nlmsg->nlmsg_len = request.size;
7fa09611
EE
1271
1272 if (!DeviceIoControl(sock->handle, OVS_IOCTL_WRITE,
6fd6ed71 1273 request.data, request.size,
7fa09611
EE
1274 NULL, 0, &bytes, overlapped)) {
1275 error = GetLastError();
1276 /* Check if the I/O got pended */
1277 if (error != ERROR_IO_INCOMPLETE && error != ERROR_IO_PENDING) {
1278 VLOG_ERR("nl_sock_wait failed - %s\n", ovs_format_message(error));
1279 retval = EINVAL;
7fa09611
EE
1280 }
1281 } else {
b91d3d03 1282 retval = EAGAIN;
7fa09611 1283 }
7fa09611
EE
1284
1285done:
1286 ofpbuf_uninit(&request);
1287 return retval;
1288}
1289#endif /* _WIN32 */
1290
2fe27d5a 1291/* Causes poll_block() to wake up when any of the specified 'events' (which is
8341662d
NR
1292 * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'.
1293 * On Windows, 'sock' is not treated as const, and may be modified. */
2fe27d5a
BP
1294void
1295nl_sock_wait(const struct nl_sock *sock, short int events)
1296{
22326ba6 1297#ifdef _WIN32
7fa09611 1298 if (sock->overlapped.Internal != STATUS_PENDING) {
b91d3d03
NR
1299 int ret = pend_io_request(CONST_CAST(struct nl_sock *, sock));
1300 if (ret == 0) {
1301 poll_wevent_wait(sock->overlapped.hEvent);
1302 } else {
1303 poll_immediate_wake();
1304 }
1305 } else {
1306 poll_wevent_wait(sock->overlapped.hEvent);
7fa09611 1307 }
22326ba6 1308#else
2fe27d5a 1309 poll_fd_wait(sock->fd, events);
22326ba6 1310#endif
2fe27d5a 1311}
50802adb 1312
9667de98 1313#ifndef _WIN32
8522ba09
BP
1314/* Returns the underlying fd for 'sock', for use in "poll()"-like operations
1315 * that can't use nl_sock_wait().
1316 *
1317 * It's a little tricky to use the returned fd correctly, because nl_sock does
1318 * "copy on write" to allow a single nl_sock to be used for notifications,
1319 * transactions, and dumps. If 'sock' is used only for notifications and
1320 * transactions (and never for dump) then the usage is safe. */
1321int
1322nl_sock_fd(const struct nl_sock *sock)
1323{
1324 return sock->fd;
1325}
9667de98 1326#endif
8522ba09 1327
50802adb
JG
1328/* Returns the PID associated with this socket. */
1329uint32_t
1330nl_sock_pid(const struct nl_sock *sock)
1331{
1332 return sock->pid;
1333}
2fe27d5a
BP
1334\f
1335/* Miscellaneous. */
1336
2ad204c8
BP
1337struct genl_family {
1338 struct hmap_node hmap_node;
1339 uint16_t id;
1340 char *name;
1341};
1342
1343static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
1344
2fe27d5a
BP
1345static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
1346 [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
213a13ed 1347 [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
2fe27d5a
BP
1348};
1349
2ad204c8
BP
1350static struct genl_family *
1351find_genl_family_by_id(uint16_t id)
1352{
1353 struct genl_family *family;
1354
1355 HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
1356 &genl_families) {
1357 if (family->id == id) {
1358 return family;
1359 }
1360 }
1361 return NULL;
1362}
1363
1364static void
1365define_genl_family(uint16_t id, const char *name)
1366{
1367 struct genl_family *family = find_genl_family_by_id(id);
1368
1369 if (family) {
1370 if (!strcmp(family->name, name)) {
1371 return;
1372 }
1373 free(family->name);
1374 } else {
1375 family = xmalloc(sizeof *family);
1376 family->id = id;
1377 hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
1378 }
1379 family->name = xstrdup(name);
1380}
1381
1382static const char *
1383genl_family_to_name(uint16_t id)
1384{
1385 if (id == GENL_ID_CTRL) {
1386 return "control";
1387 } else {
1388 struct genl_family *family = find_genl_family_by_id(id);
1389 return family ? family->name : "unknown";
1390 }
1391}
1392
b3fca8a8 1393#ifndef _WIN32
e408762f 1394static int
2a477244
BP
1395do_lookup_genl_family(const char *name, struct nlattr **attrs,
1396 struct ofpbuf **replyp)
2fe27d5a
BP
1397{
1398 struct nl_sock *sock;
1399 struct ofpbuf request, *reply;
2a477244 1400 int error;
2fe27d5a 1401
2a477244
BP
1402 *replyp = NULL;
1403 error = nl_sock_create(NETLINK_GENERIC, &sock);
1404 if (error) {
1405 return error;
2fe27d5a
BP
1406 }
1407
1408 ofpbuf_init(&request, 0);
1409 nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
1410 CTRL_CMD_GETFAMILY, 1);
1411 nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
2a477244 1412 error = nl_sock_transact(sock, &request, &reply);
2fe27d5a 1413 ofpbuf_uninit(&request);
2a477244 1414 if (error) {
2fe27d5a 1415 nl_sock_destroy(sock);
2a477244 1416 return error;
2fe27d5a
BP
1417 }
1418
1419 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
2a477244
BP
1420 family_policy, attrs, ARRAY_SIZE(family_policy))
1421 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
2fe27d5a
BP
1422 nl_sock_destroy(sock);
1423 ofpbuf_delete(reply);
2a477244 1424 return EPROTO;
2fe27d5a
BP
1425 }
1426
2fe27d5a 1427 nl_sock_destroy(sock);
2a477244
BP
1428 *replyp = reply;
1429 return 0;
2fe27d5a 1430}
b3fca8a8
NR
1431#else
1432static int
1433do_lookup_genl_family(const char *name, struct nlattr **attrs,
1434 struct ofpbuf **replyp)
1435{
4c484aca 1436 struct nlmsghdr *nlmsg;
b3fca8a8
NR
1437 struct ofpbuf *reply;
1438 int error;
1439 uint16_t family_id;
1440 const char *family_name;
1441 uint32_t family_version;
1442 uint32_t family_attrmax;
4c484aca
NR
1443 uint32_t mcgrp_id = OVS_WIN_NL_INVALID_MCGRP_ID;
1444 const char *mcgrp_name = NULL;
b3fca8a8
NR
1445
1446 *replyp = NULL;
1447 reply = ofpbuf_new(1024);
1448
4c484aca 1449 /* CTRL_ATTR_MCAST_GROUPS is supported only for VPORT family. */
b3fca8a8
NR
1450 if (!strcmp(name, OVS_WIN_CONTROL_FAMILY)) {
1451 family_id = OVS_WIN_NL_CTRL_FAMILY_ID;
1452 family_name = OVS_WIN_CONTROL_FAMILY;
1453 family_version = OVS_WIN_CONTROL_VERSION;
1454 family_attrmax = OVS_WIN_CONTROL_ATTR_MAX;
1455 } else if (!strcmp(name, OVS_DATAPATH_FAMILY)) {
1456 family_id = OVS_WIN_NL_DATAPATH_FAMILY_ID;
1457 family_name = OVS_DATAPATH_FAMILY;
1458 family_version = OVS_DATAPATH_VERSION;
1459 family_attrmax = OVS_DP_ATTR_MAX;
1460 } else if (!strcmp(name, OVS_PACKET_FAMILY)) {
1461 family_id = OVS_WIN_NL_PACKET_FAMILY_ID;
1462 family_name = OVS_PACKET_FAMILY;
1463 family_version = OVS_PACKET_VERSION;
1464 family_attrmax = OVS_PACKET_ATTR_MAX;
1465 } else if (!strcmp(name, OVS_VPORT_FAMILY)) {
1466 family_id = OVS_WIN_NL_VPORT_FAMILY_ID;
1467 family_name = OVS_VPORT_FAMILY;
1468 family_version = OVS_VPORT_VERSION;
1469 family_attrmax = OVS_VPORT_ATTR_MAX;
4c484aca
NR
1470 mcgrp_id = OVS_WIN_NL_VPORT_MCGRP_ID;
1471 mcgrp_name = OVS_VPORT_MCGROUP;
b3fca8a8
NR
1472 } else if (!strcmp(name, OVS_FLOW_FAMILY)) {
1473 family_id = OVS_WIN_NL_FLOW_FAMILY_ID;
1474 family_name = OVS_FLOW_FAMILY;
1475 family_version = OVS_FLOW_VERSION;
1476 family_attrmax = OVS_FLOW_ATTR_MAX;
83cc9d56
NR
1477 } else if (!strcmp(name, OVS_WIN_NETDEV_FAMILY)) {
1478 family_id = OVS_WIN_NL_NETDEV_FAMILY_ID;
1479 family_name = OVS_WIN_NETDEV_FAMILY;
1480 family_version = OVS_WIN_NETDEV_VERSION;
1481 family_attrmax = OVS_WIN_NETDEV_ATTR_MAX;
b3fca8a8
NR
1482 } else {
1483 ofpbuf_delete(reply);
1484 return EINVAL;
1485 }
1486
1487 nl_msg_put_genlmsghdr(reply, 0, GENL_ID_CTRL, 0,
1488 CTRL_CMD_NEWFAMILY, family_version);
1489 /* CTRL_ATTR_HDRSIZE and CTRL_ATTR_OPS are not populated, but the
1490 * callers do not seem to need them. */
1491 nl_msg_put_u16(reply, CTRL_ATTR_FAMILY_ID, family_id);
1492 nl_msg_put_string(reply, CTRL_ATTR_FAMILY_NAME, family_name);
1493 nl_msg_put_u32(reply, CTRL_ATTR_VERSION, family_version);
1494 nl_msg_put_u32(reply, CTRL_ATTR_MAXATTR, family_attrmax);
1495
4c484aca
NR
1496 if (mcgrp_id != OVS_WIN_NL_INVALID_MCGRP_ID) {
1497 size_t mcgrp_ofs1 = nl_msg_start_nested(reply, CTRL_ATTR_MCAST_GROUPS);
1498 size_t mcgrp_ofs2= nl_msg_start_nested(reply,
1499 OVS_WIN_NL_VPORT_MCGRP_ID - OVS_WIN_NL_MCGRP_START_ID);
1500 nl_msg_put_u32(reply, CTRL_ATTR_MCAST_GRP_ID, mcgrp_id);
1501 ovs_assert(mcgrp_name != NULL);
1502 nl_msg_put_string(reply, CTRL_ATTR_MCAST_GRP_NAME, mcgrp_name);
1503 nl_msg_end_nested(reply, mcgrp_ofs2);
1504 nl_msg_end_nested(reply, mcgrp_ofs1);
1505 }
1506
1507 /* Set the total length of the netlink message. */
1508 nlmsg = nl_msg_nlmsghdr(reply);
6fd6ed71 1509 nlmsg->nlmsg_len = reply->size;
4c484aca 1510
b3fca8a8
NR
1511 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
1512 family_policy, attrs, ARRAY_SIZE(family_policy))
1513 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
b3fca8a8
NR
1514 ofpbuf_delete(reply);
1515 return EPROTO;
1516 }
1517
1518 *replyp = reply;
1519 return 0;
1520}
1521#endif
2fe27d5a 1522
e408762f
EJ
1523/* Finds the multicast group called 'group_name' in genl family 'family_name'.
1524 * When successful, writes its result to 'multicast_group' and returns 0.
213a13ed 1525 * Otherwise, clears 'multicast_group' and returns a positive error code.
b3dcb73c 1526 */
e408762f
EJ
1527int
1528nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
b3dcb73c 1529 unsigned int *multicast_group)
e408762f
EJ
1530{
1531 struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
6d23c6f4 1532 const struct nlattr *mc;
2a477244 1533 struct ofpbuf *reply;
e408762f 1534 unsigned int left;
2a477244 1535 int error;
e408762f
EJ
1536
1537 *multicast_group = 0;
2a477244
BP
1538 error = do_lookup_genl_family(family_name, family_attrs, &reply);
1539 if (error) {
1540 return error;
e408762f
EJ
1541 }
1542
213a13ed 1543 if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
b3dcb73c 1544 error = EPROTO;
213a13ed
EJ
1545 goto exit;
1546 }
1547
6d23c6f4 1548 NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
e408762f
EJ
1549 static const struct nl_policy mc_policy[] = {
1550 [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
1551 [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
1552 };
1553
1554 struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
1555 const char *mc_name;
1556
1557 if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
2a477244
BP
1558 error = EPROTO;
1559 goto exit;
e408762f
EJ
1560 }
1561
1562 mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
1563 if (!strcmp(group_name, mc_name)) {
1564 *multicast_group =
1565 nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
2a477244
BP
1566 error = 0;
1567 goto exit;
e408762f
EJ
1568 }
1569 }
2a477244 1570 error = EPROTO;
e408762f 1571
2a477244
BP
1572exit:
1573 ofpbuf_delete(reply);
1574 return error;
e408762f
EJ
1575}
1576
2fe27d5a
BP
1577/* If '*number' is 0, translates the given Generic Netlink family 'name' to a
1578 * number and stores it in '*number'. If successful, returns 0 and the caller
1579 * may use '*number' as the family number. On failure, returns a positive
1580 * errno value and '*number' caches the errno value. */
1581int
1582nl_lookup_genl_family(const char *name, int *number)
1583{
1584 if (*number == 0) {
2a477244
BP
1585 struct nlattr *attrs[ARRAY_SIZE(family_policy)];
1586 struct ofpbuf *reply;
1587 int error;
1588
1589 error = do_lookup_genl_family(name, attrs, &reply);
1590 if (!error) {
1591 *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
1592 define_genl_family(*number, name);
1593 } else {
1594 *number = -error;
1595 }
1596 ofpbuf_delete(reply);
1597
cb22974d 1598 ovs_assert(*number != 0);
2fe27d5a
BP
1599 }
1600 return *number > 0 ? 0 : -*number;
1601}
a88b4e04
BP
1602\f
1603struct nl_pool {
1604 struct nl_sock *socks[16];
1605 int n;
1606};
1607
834d6caf 1608static struct ovs_mutex pool_mutex = OVS_MUTEX_INITIALIZER;
97be1538 1609static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
a88b4e04
BP
1610
1611static int
1612nl_pool_alloc(int protocol, struct nl_sock **sockp)
1613{
0bd01224 1614 struct nl_sock *sock = NULL;
a88b4e04
BP
1615 struct nl_pool *pool;
1616
1617 ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
1618
97be1538 1619 ovs_mutex_lock(&pool_mutex);
a88b4e04
BP
1620 pool = &pools[protocol];
1621 if (pool->n > 0) {
0bd01224
BP
1622 sock = pool->socks[--pool->n];
1623 }
97be1538 1624 ovs_mutex_unlock(&pool_mutex);
0bd01224
BP
1625
1626 if (sock) {
1627 *sockp = sock;
a88b4e04
BP
1628 return 0;
1629 } else {
1630 return nl_sock_create(protocol, sockp);
1631 }
1632}
1633
1634static void
1635nl_pool_release(struct nl_sock *sock)
1636{
1637 if (sock) {
1638 struct nl_pool *pool = &pools[sock->protocol];
1639
97be1538 1640 ovs_mutex_lock(&pool_mutex);
a88b4e04
BP
1641 if (pool->n < ARRAY_SIZE(pool->socks)) {
1642 pool->socks[pool->n++] = sock;
0bd01224 1643 sock = NULL;
a88b4e04 1644 }
97be1538 1645 ovs_mutex_unlock(&pool_mutex);
0bd01224
BP
1646
1647 nl_sock_destroy(sock);
a88b4e04
BP
1648 }
1649}
1650
022ad2b9
BP
1651/* Sends 'request' to the kernel on a Netlink socket for the given 'protocol'
1652 * (e.g. NETLINK_ROUTE or NETLINK_GENERIC) and waits for a response. If
1653 * successful, returns 0. On failure, returns a positive errno value.
1654 *
1655 * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
1656 * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
1657 * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
1658 * reply, if any, is discarded.
1659 *
1660 * Before the message is sent, nlmsg_len in 'request' will be finalized to
6fd6ed71 1661 * match msg->size, nlmsg_pid will be set to the pid of the socket used
022ad2b9
BP
1662 * for sending the request, and nlmsg_seq will be initialized.
1663 *
1664 * The caller is responsible for destroying 'request'.
1665 *
1666 * Bare Netlink is an unreliable transport protocol. This function layers
1667 * reliable delivery and reply semantics on top of bare Netlink.
1668 *
1669 * In Netlink, sending a request to the kernel is reliable enough, because the
1670 * kernel will tell us if the message cannot be queued (and we will in that
1671 * case put it on the transmit queue and wait until it can be delivered).
1672 *
1673 * Receiving the reply is the real problem: if the socket buffer is full when
1674 * the kernel tries to send the reply, the reply will be dropped. However, the
1675 * kernel sets a flag that a reply has been dropped. The next call to recv
1676 * then returns ENOBUFS. We can then re-send the request.
1677 *
1678 * Caveats:
1679 *
1680 * 1. Netlink depends on sequence numbers to match up requests and
1681 * replies. The sender of a request supplies a sequence number, and
1682 * the reply echos back that sequence number.
1683 *
1684 * This is fine, but (1) some kernel netlink implementations are
1685 * broken, in that they fail to echo sequence numbers and (2) this
1686 * function will drop packets with non-matching sequence numbers, so
1687 * that only a single request can be usefully transacted at a time.
1688 *
1689 * 2. Resending the request causes it to be re-executed, so the request
1690 * needs to be idempotent.
1691 */
a88b4e04
BP
1692int
1693nl_transact(int protocol, const struct ofpbuf *request,
1694 struct ofpbuf **replyp)
1695{
1696 struct nl_sock *sock;
1697 int error;
1698
1699 error = nl_pool_alloc(protocol, &sock);
1700 if (error) {
d9c194a1
RD
1701 if (replyp) {
1702 *replyp = NULL;
1703 }
a88b4e04
BP
1704 return error;
1705 }
1706
1707 error = nl_sock_transact(sock, request, replyp);
1708
1709 nl_pool_release(sock);
1710 return error;
1711}
1712
022ad2b9
BP
1713/* Sends the 'request' member of the 'n' transactions in 'transactions' on a
1714 * Netlink socket for the given 'protocol' (e.g. NETLINK_ROUTE or
1715 * NETLINK_GENERIC), in order, and receives responses to all of them. Fills in
1716 * the 'error' member of each transaction with 0 if it was successful,
1717 * otherwise with a positive errno value. If 'reply' is nonnull, then it will
1718 * be filled with the reply if the message receives a detailed reply. In other
1719 * cases, i.e. where the request failed or had no reply beyond an indication of
1720 * success, 'reply' will be cleared if it is nonnull.
1721 *
1722 * The caller is responsible for destroying each request and reply, and the
1723 * transactions array itself.
1724 *
1725 * Before sending each message, this function will finalize nlmsg_len in each
1726 * 'request' to match the ofpbuf's size, set nlmsg_pid to the pid of the socket
1727 * used for the transaction, and initialize nlmsg_seq.
1728 *
1729 * Bare Netlink is an unreliable transport protocol. This function layers
1730 * reliable delivery and reply semantics on top of bare Netlink. See
1731 * nl_transact() for some caveats.
1732 */
a88b4e04
BP
1733void
1734nl_transact_multiple(int protocol,
1735 struct nl_transaction **transactions, size_t n)
1736{
1737 struct nl_sock *sock;
1738 int error;
1739
1740 error = nl_pool_alloc(protocol, &sock);
1741 if (!error) {
1742 nl_sock_transact_multiple(sock, transactions, n);
1743 nl_pool_release(sock);
1744 } else {
1745 nl_sock_record_errors__(transactions, n, error);
1746 }
1747}
1748
2fe27d5a 1749\f
7d7447df
BP
1750static uint32_t
1751nl_sock_allocate_seq(struct nl_sock *sock, unsigned int n)
1752{
1753 uint32_t seq = sock->next_seq;
1754
1755 sock->next_seq += n;
1756
1757 /* Make it impossible for the next request for sequence numbers to wrap
1758 * around to 0. Start over with 1 to avoid ever using a sequence number of
1759 * 0, because the kernel uses sequence number 0 for notifications. */
1760 if (sock->next_seq >= UINT32_MAX / 2) {
1761 sock->next_seq = 1;
1762 }
1763
1764 return seq;
1765}
1766
2fe27d5a 1767static void
2ad204c8 1768nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
2fe27d5a
BP
1769{
1770 struct nlmsg_flag {
1771 unsigned int bits;
1772 const char *name;
1773 };
1774 static const struct nlmsg_flag flags[] = {
1775 { NLM_F_REQUEST, "REQUEST" },
1776 { NLM_F_MULTI, "MULTI" },
1777 { NLM_F_ACK, "ACK" },
1778 { NLM_F_ECHO, "ECHO" },
1779 { NLM_F_DUMP, "DUMP" },
1780 { NLM_F_ROOT, "ROOT" },
1781 { NLM_F_MATCH, "MATCH" },
1782 { NLM_F_ATOMIC, "ATOMIC" },
1783 };
1784 const struct nlmsg_flag *flag;
1785 uint16_t flags_left;
1786
1787 ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
1788 h->nlmsg_len, h->nlmsg_type);
1789 if (h->nlmsg_type == NLMSG_NOOP) {
1790 ds_put_cstr(ds, "(no-op)");
1791 } else if (h->nlmsg_type == NLMSG_ERROR) {
1792 ds_put_cstr(ds, "(error)");
1793 } else if (h->nlmsg_type == NLMSG_DONE) {
1794 ds_put_cstr(ds, "(done)");
1795 } else if (h->nlmsg_type == NLMSG_OVERRUN) {
1796 ds_put_cstr(ds, "(overrun)");
1797 } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
1798 ds_put_cstr(ds, "(reserved)");
2ad204c8
BP
1799 } else if (protocol == NETLINK_GENERIC) {
1800 ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
2fe27d5a
BP
1801 } else {
1802 ds_put_cstr(ds, "(family-defined)");
1803 }
1804 ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
1805 flags_left = h->nlmsg_flags;
1806 for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
1807 if ((flags_left & flag->bits) == flag->bits) {
1808 ds_put_format(ds, "[%s]", flag->name);
1809 flags_left &= ~flag->bits;
1810 }
1811 }
1812 if (flags_left) {
1813 ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
1814 }
2c5a6834
BP
1815 ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
1816 h->nlmsg_seq, h->nlmsg_pid);
2fe27d5a
BP
1817}
1818
1819static char *
7041c3a9 1820nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
2fe27d5a
BP
1821{
1822 struct ds ds = DS_EMPTY_INITIALIZER;
1823 const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
1824 if (h) {
2ad204c8 1825 nlmsghdr_to_string(h, protocol, &ds);
2fe27d5a
BP
1826 if (h->nlmsg_type == NLMSG_ERROR) {
1827 const struct nlmsgerr *e;
1828 e = ofpbuf_at(buffer, NLMSG_HDRLEN,
1829 NLMSG_ALIGN(sizeof(struct nlmsgerr)));
1830 if (e) {
1831 ds_put_format(&ds, " error(%d", e->error);
1832 if (e->error < 0) {
10a89ef0 1833 ds_put_format(&ds, "(%s)", ovs_strerror(-e->error));
2fe27d5a
BP
1834 }
1835 ds_put_cstr(&ds, ", in-reply-to(");
2ad204c8 1836 nlmsghdr_to_string(&e->msg, protocol, &ds);
2fe27d5a
BP
1837 ds_put_cstr(&ds, "))");
1838 } else {
1839 ds_put_cstr(&ds, " error(truncated)");
1840 }
1841 } else if (h->nlmsg_type == NLMSG_DONE) {
1842 int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
1843 if (error) {
1844 ds_put_format(&ds, " done(%d", *error);
1845 if (*error < 0) {
10a89ef0 1846 ds_put_format(&ds, "(%s)", ovs_strerror(-*error));
2fe27d5a
BP
1847 }
1848 ds_put_cstr(&ds, ")");
1849 } else {
1850 ds_put_cstr(&ds, " done(truncated)");
1851 }
7041c3a9
BP
1852 } else if (protocol == NETLINK_GENERIC) {
1853 struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
1854 if (genl) {
1855 ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
1856 genl->cmd, genl->version);
1857 }
2fe27d5a
BP
1858 }
1859 } else {
1860 ds_put_cstr(&ds, "nl(truncated)");
1861 }
1862 return ds.string;
1863}
1864
1865static void
1866log_nlmsg(const char *function, int error,
7041c3a9 1867 const void *message, size_t size, int protocol)
2fe27d5a 1868{
2fe27d5a
BP
1869 if (!VLOG_IS_DBG_ENABLED()) {
1870 return;
1871 }
1872
0a2869d5
BP
1873 struct ofpbuf buffer = ofpbuf_const_initializer(message, size);
1874 char *nlmsg = nlmsg_to_string(&buffer, protocol);
10a89ef0 1875 VLOG_DBG_RL(&rl, "%s (%s): %s", function, ovs_strerror(error), nlmsg);
2fe27d5a
BP
1876 free(nlmsg);
1877}