]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netlink-socket.c
cfm: Notify connectivity_seq on cfm_set_fault
[mirror_ovs.git] / lib / netlink-socket.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "netlink-socket.h"
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <stdlib.h>
22 #include <sys/types.h>
23 #include <sys/uio.h>
24 #include <unistd.h>
25 #include "coverage.h"
26 #include "dynamic-string.h"
27 #include "hash.h"
28 #include "hmap.h"
29 #include "netlink.h"
30 #include "netlink-protocol.h"
31 #include "ofpbuf.h"
32 #include "ovs-thread.h"
33 #include "poll-loop.h"
34 #include "socket-util.h"
35 #include "util.h"
36 #include "vlog.h"
37
38 VLOG_DEFINE_THIS_MODULE(netlink_socket);
39
40 COVERAGE_DEFINE(netlink_overflow);
41 COVERAGE_DEFINE(netlink_received);
42 COVERAGE_DEFINE(netlink_recv_jumbo);
43 COVERAGE_DEFINE(netlink_sent);
44
45 /* Linux header file confusion causes this to be undefined. */
46 #ifndef SOL_NETLINK
47 #define SOL_NETLINK 270
48 #endif
49
50 /* A single (bad) Netlink message can in theory dump out many, many log
51 * messages, so the burst size is set quite high here to avoid missing useful
52 * information. Also, at high logging levels we log *all* Netlink messages. */
53 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
54
55 static uint32_t nl_sock_allocate_seq(struct nl_sock *, unsigned int n);
56 static void log_nlmsg(const char *function, int error,
57 const void *message, size_t size, int protocol);
58 \f
59 /* Netlink sockets. */
60
61 struct nl_sock {
62 int fd;
63 uint32_t next_seq;
64 uint32_t pid;
65 int protocol;
66 unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
67 };
68
69 /* Compile-time limit on iovecs, so that we can allocate a maximum-size array
70 * of iovecs on the stack. */
71 #define MAX_IOVS 128
72
73 /* Maximum number of iovecs that may be passed to sendmsg, capped at a
74 * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
75 *
76 * Initialized by nl_sock_create(). */
77 static int max_iovs;
78
79 static int nl_pool_alloc(int protocol, struct nl_sock **sockp);
80 static void nl_pool_release(struct nl_sock *);
81
82 /* Creates a new netlink socket for the given netlink 'protocol'
83 * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
84 * new socket if successful, otherwise returns a positive errno value. */
85 int
86 nl_sock_create(int protocol, struct nl_sock **sockp)
87 {
88 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
89 struct nl_sock *sock;
90 struct sockaddr_nl local, remote;
91 socklen_t local_size;
92 int rcvbuf;
93 int retval = 0;
94
95 if (ovsthread_once_start(&once)) {
96 int save_errno = errno;
97 errno = 0;
98
99 max_iovs = sysconf(_SC_UIO_MAXIOV);
100 if (max_iovs < _XOPEN_IOV_MAX) {
101 if (max_iovs == -1 && errno) {
102 VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", ovs_strerror(errno));
103 }
104 max_iovs = _XOPEN_IOV_MAX;
105 } else if (max_iovs > MAX_IOVS) {
106 max_iovs = MAX_IOVS;
107 }
108
109 errno = save_errno;
110 ovsthread_once_done(&once);
111 }
112
113 *sockp = NULL;
114 sock = xmalloc(sizeof *sock);
115
116 sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
117 if (sock->fd < 0) {
118 VLOG_ERR("fcntl: %s", ovs_strerror(errno));
119 goto error;
120 }
121 sock->protocol = protocol;
122 sock->next_seq = 1;
123
124 rcvbuf = 1024 * 1024;
125 if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
126 &rcvbuf, sizeof rcvbuf)) {
127 /* Only root can use SO_RCVBUFFORCE. Everyone else gets EPERM.
128 * Warn only if the failure is therefore unexpected. */
129 if (errno != EPERM) {
130 VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed "
131 "(%s)", rcvbuf, ovs_strerror(errno));
132 }
133 }
134
135 retval = get_socket_rcvbuf(sock->fd);
136 if (retval < 0) {
137 retval = -retval;
138 goto error;
139 }
140 sock->rcvbuf = retval;
141
142 /* Connect to kernel (pid 0) as remote address. */
143 memset(&remote, 0, sizeof remote);
144 remote.nl_family = AF_NETLINK;
145 remote.nl_pid = 0;
146 if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
147 VLOG_ERR("connect(0): %s", ovs_strerror(errno));
148 goto error;
149 }
150
151 /* Obtain pid assigned by kernel. */
152 local_size = sizeof local;
153 if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
154 VLOG_ERR("getsockname: %s", ovs_strerror(errno));
155 goto error;
156 }
157 if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
158 VLOG_ERR("getsockname returned bad Netlink name");
159 retval = EINVAL;
160 goto error;
161 }
162 sock->pid = local.nl_pid;
163
164 *sockp = sock;
165 return 0;
166
167 error:
168 if (retval == 0) {
169 retval = errno;
170 if (retval == 0) {
171 retval = EINVAL;
172 }
173 }
174 if (sock->fd >= 0) {
175 close(sock->fd);
176 }
177 free(sock);
178 return retval;
179 }
180
181 /* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
182 * sets '*sockp' to the new socket if successful, otherwise returns a positive
183 * errno value. */
184 int
185 nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
186 {
187 return nl_sock_create(src->protocol, sockp);
188 }
189
190 /* Destroys netlink socket 'sock'. */
191 void
192 nl_sock_destroy(struct nl_sock *sock)
193 {
194 if (sock) {
195 close(sock->fd);
196 free(sock);
197 }
198 }
199
200 /* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
201 * successful, otherwise a positive errno value.
202 *
203 * A socket that is subscribed to a multicast group that receives asynchronous
204 * notifications must not be used for Netlink transactions or dumps, because
205 * transactions and dumps can cause notifications to be lost.
206 *
207 * Multicast group numbers are always positive.
208 *
209 * It is not an error to attempt to join a multicast group to which a socket
210 * already belongs. */
211 int
212 nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
213 {
214 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
215 &multicast_group, sizeof multicast_group) < 0) {
216 VLOG_WARN("could not join multicast group %u (%s)",
217 multicast_group, ovs_strerror(errno));
218 return errno;
219 }
220 return 0;
221 }
222
223 /* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
224 * successful, otherwise a positive errno value.
225 *
226 * Multicast group numbers are always positive.
227 *
228 * It is not an error to attempt to leave a multicast group to which a socket
229 * does not belong.
230 *
231 * On success, reading from 'sock' will still return any messages that were
232 * received on 'multicast_group' before the group was left. */
233 int
234 nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
235 {
236 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
237 &multicast_group, sizeof multicast_group) < 0) {
238 VLOG_WARN("could not leave multicast group %u (%s)",
239 multicast_group, ovs_strerror(errno));
240 return errno;
241 }
242 return 0;
243 }
244
245 static int
246 nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg,
247 uint32_t nlmsg_seq, bool wait)
248 {
249 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
250 int error;
251
252 nlmsg->nlmsg_len = msg->size;
253 nlmsg->nlmsg_seq = nlmsg_seq;
254 nlmsg->nlmsg_pid = sock->pid;
255 do {
256 int retval;
257 retval = send(sock->fd, msg->data, msg->size, wait ? 0 : MSG_DONTWAIT);
258 error = retval < 0 ? errno : 0;
259 } while (error == EINTR);
260 log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol);
261 if (!error) {
262 COVERAGE_INC(netlink_sent);
263 }
264 return error;
265 }
266
267 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
268 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
269 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to a fresh
270 * sequence number, before the message is sent.
271 *
272 * Returns 0 if successful, otherwise a positive errno value. If
273 * 'wait' is true, then the send will wait until buffer space is ready;
274 * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
275 int
276 nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
277 {
278 return nl_sock_send_seq(sock, msg, nl_sock_allocate_seq(sock, 1), wait);
279 }
280
281 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
282 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
283 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to
284 * 'nlmsg_seq', before the message is sent.
285 *
286 * Returns 0 if successful, otherwise a positive errno value. If
287 * 'wait' is true, then the send will wait until buffer space is ready;
288 * otherwise, returns EAGAIN if the 'sock' send buffer is full.
289 *
290 * This function is suitable for sending a reply to a request that was received
291 * with sequence number 'nlmsg_seq'. Otherwise, use nl_sock_send() instead. */
292 int
293 nl_sock_send_seq(struct nl_sock *sock, const struct ofpbuf *msg,
294 uint32_t nlmsg_seq, bool wait)
295 {
296 return nl_sock_send__(sock, msg, nlmsg_seq, wait);
297 }
298
299 static int
300 nl_sock_recv__(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
301 {
302 /* We can't accurately predict the size of the data to be received. The
303 * caller is supposed to have allocated enough space in 'buf' to handle the
304 * "typical" case. To handle exceptions, we make available enough space in
305 * 'tail' to allow Netlink messages to be up to 64 kB long (a reasonable
306 * figure since that's the maximum length of a Netlink attribute). */
307 struct nlmsghdr *nlmsghdr;
308 uint8_t tail[65536];
309 struct iovec iov[2];
310 struct msghdr msg;
311 ssize_t retval;
312
313 ovs_assert(buf->allocated >= sizeof *nlmsghdr);
314 ofpbuf_clear(buf);
315
316 iov[0].iov_base = buf->base;
317 iov[0].iov_len = buf->allocated;
318 iov[1].iov_base = tail;
319 iov[1].iov_len = sizeof tail;
320
321 memset(&msg, 0, sizeof msg);
322 msg.msg_iov = iov;
323 msg.msg_iovlen = 2;
324
325 do {
326 retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
327 } while (retval < 0 && errno == EINTR);
328
329 if (retval < 0) {
330 int error = errno;
331 if (error == ENOBUFS) {
332 /* Socket receive buffer overflow dropped one or more messages that
333 * the kernel tried to send to us. */
334 COVERAGE_INC(netlink_overflow);
335 }
336 return error;
337 }
338
339 if (msg.msg_flags & MSG_TRUNC) {
340 VLOG_ERR_RL(&rl, "truncated message (longer than %"PRIuSIZE" bytes)",
341 sizeof tail);
342 return E2BIG;
343 }
344
345 nlmsghdr = buf->data;
346 if (retval < sizeof *nlmsghdr
347 || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
348 || nlmsghdr->nlmsg_len > retval) {
349 VLOG_ERR_RL(&rl, "received invalid nlmsg (%"PRIuSIZE"d bytes < %"PRIuSIZE")",
350 retval, sizeof *nlmsghdr);
351 return EPROTO;
352 }
353
354 buf->size = MIN(retval, buf->allocated);
355 if (retval > buf->allocated) {
356 COVERAGE_INC(netlink_recv_jumbo);
357 ofpbuf_put(buf, tail, retval - buf->allocated);
358 }
359
360 log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol);
361 COVERAGE_INC(netlink_received);
362
363 return 0;
364 }
365
366 /* Tries to receive a Netlink message from the kernel on 'sock' into 'buf'. If
367 * 'wait' is true, waits for a message to be ready. Otherwise, fails with
368 * EAGAIN if the 'sock' receive buffer is empty.
369 *
370 * The caller must have initialized 'buf' with an allocation of at least
371 * NLMSG_HDRLEN bytes. For best performance, the caller should allocate enough
372 * space for a "typical" message.
373 *
374 * On success, returns 0 and replaces 'buf''s previous content by the received
375 * message. This function expands 'buf''s allocated memory, as necessary, to
376 * hold the actual size of the received message.
377 *
378 * On failure, returns a positive errno value and clears 'buf' to zero length.
379 * 'buf' retains its previous memory allocation.
380 *
381 * Regardless of success or failure, this function resets 'buf''s headroom to
382 * 0. */
383 int
384 nl_sock_recv(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
385 {
386 return nl_sock_recv__(sock, buf, wait);
387 }
388
389 static void
390 nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
391 int error)
392 {
393 size_t i;
394
395 for (i = 0; i < n; i++) {
396 struct nl_transaction *txn = transactions[i];
397
398 txn->error = error;
399 if (txn->reply) {
400 ofpbuf_clear(txn->reply);
401 }
402 }
403 }
404
405 static int
406 nl_sock_transact_multiple__(struct nl_sock *sock,
407 struct nl_transaction **transactions, size_t n,
408 size_t *done)
409 {
410 uint64_t tmp_reply_stub[1024 / 8];
411 struct nl_transaction tmp_txn;
412 struct ofpbuf tmp_reply;
413
414 uint32_t base_seq;
415 struct iovec iovs[MAX_IOVS];
416 struct msghdr msg;
417 int error;
418 int i;
419
420 base_seq = nl_sock_allocate_seq(sock, n);
421 *done = 0;
422 for (i = 0; i < n; i++) {
423 struct nl_transaction *txn = transactions[i];
424 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);
425
426 nlmsg->nlmsg_len = txn->request->size;
427 nlmsg->nlmsg_seq = base_seq + i;
428 nlmsg->nlmsg_pid = sock->pid;
429
430 iovs[i].iov_base = txn->request->data;
431 iovs[i].iov_len = txn->request->size;
432 }
433
434 memset(&msg, 0, sizeof msg);
435 msg.msg_iov = iovs;
436 msg.msg_iovlen = n;
437 do {
438 error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
439 } while (error == EINTR);
440
441 for (i = 0; i < n; i++) {
442 struct nl_transaction *txn = transactions[i];
443
444 log_nlmsg(__func__, error, txn->request->data, txn->request->size,
445 sock->protocol);
446 }
447 if (!error) {
448 COVERAGE_ADD(netlink_sent, n);
449 }
450
451 if (error) {
452 return error;
453 }
454
455 ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
456 tmp_txn.request = NULL;
457 tmp_txn.reply = &tmp_reply;
458 tmp_txn.error = 0;
459 while (n > 0) {
460 struct nl_transaction *buf_txn, *txn;
461 uint32_t seq;
462
463 /* Find a transaction whose buffer we can use for receiving a reply.
464 * If no such transaction is left, use tmp_txn. */
465 buf_txn = &tmp_txn;
466 for (i = 0; i < n; i++) {
467 if (transactions[i]->reply) {
468 buf_txn = transactions[i];
469 break;
470 }
471 }
472
473 /* Receive a reply. */
474 error = nl_sock_recv__(sock, buf_txn->reply, false);
475 if (error) {
476 if (error == EAGAIN) {
477 nl_sock_record_errors__(transactions, n, 0);
478 *done += n;
479 error = 0;
480 }
481 break;
482 }
483
484 /* Match the reply up with a transaction. */
485 seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
486 if (seq < base_seq || seq >= base_seq + n) {
487 VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
488 continue;
489 }
490 i = seq - base_seq;
491 txn = transactions[i];
492
493 /* Fill in the results for 'txn'. */
494 if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
495 if (txn->reply) {
496 ofpbuf_clear(txn->reply);
497 }
498 if (txn->error) {
499 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
500 error, ovs_strerror(txn->error));
501 }
502 } else {
503 txn->error = 0;
504 if (txn->reply && txn != buf_txn) {
505 /* Swap buffers. */
506 struct ofpbuf *reply = buf_txn->reply;
507 buf_txn->reply = txn->reply;
508 txn->reply = reply;
509 }
510 }
511
512 /* Fill in the results for transactions before 'txn'. (We have to do
513 * this after the results for 'txn' itself because of the buffer swap
514 * above.) */
515 nl_sock_record_errors__(transactions, i, 0);
516
517 /* Advance. */
518 *done += i + 1;
519 transactions += i + 1;
520 n -= i + 1;
521 base_seq += i + 1;
522 }
523 ofpbuf_uninit(&tmp_reply);
524
525 return error;
526 }
527
528 /* Sends the 'request' member of the 'n' transactions in 'transactions' on
529 * 'sock', in order, and receives responses to all of them. Fills in the
530 * 'error' member of each transaction with 0 if it was successful, otherwise
531 * with a positive errno value. If 'reply' is nonnull, then it will be filled
532 * with the reply if the message receives a detailed reply. In other cases,
533 * i.e. where the request failed or had no reply beyond an indication of
534 * success, 'reply' will be cleared if it is nonnull.
535 *
536 * The caller is responsible for destroying each request and reply, and the
537 * transactions array itself.
538 *
539 * Before sending each message, this function will finalize nlmsg_len in each
540 * 'request' to match the ofpbuf's size, set nlmsg_pid to 'sock''s pid, and
541 * initialize nlmsg_seq.
542 *
543 * Bare Netlink is an unreliable transport protocol. This function layers
544 * reliable delivery and reply semantics on top of bare Netlink. See
545 * nl_sock_transact() for some caveats.
546 */
547 void
548 nl_sock_transact_multiple(struct nl_sock *sock,
549 struct nl_transaction **transactions, size_t n)
550 {
551 int max_batch_count;
552 int error;
553
554 if (!n) {
555 return;
556 }
557
558 /* In theory, every request could have a 64 kB reply. But the default and
559 * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
560 * be a bit below 128 kB, so that would only allow a single message in a
561 * "batch". So we assume that replies average (at most) 4 kB, which allows
562 * a good deal of batching.
563 *
564 * In practice, most of the requests that we batch either have no reply at
565 * all or a brief reply. */
566 max_batch_count = MAX(sock->rcvbuf / 4096, 1);
567 max_batch_count = MIN(max_batch_count, max_iovs);
568
569 while (n > 0) {
570 size_t count, bytes;
571 size_t done;
572
573 /* Batch up to 'max_batch_count' transactions. But cap it at about a
574 * page of requests total because big skbuffs are expensive to
575 * allocate in the kernel. */
576 #if defined(PAGESIZE)
577 enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
578 #else
579 enum { MAX_BATCH_BYTES = 4096 - 512 };
580 #endif
581 bytes = transactions[0]->request->size;
582 for (count = 1; count < n && count < max_batch_count; count++) {
583 if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
584 break;
585 }
586 bytes += transactions[count]->request->size;
587 }
588
589 error = nl_sock_transact_multiple__(sock, transactions, count, &done);
590 transactions += done;
591 n -= done;
592
593 if (error == ENOBUFS) {
594 VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
595 } else if (error) {
596 VLOG_ERR_RL(&rl, "transaction error (%s)", ovs_strerror(error));
597 nl_sock_record_errors__(transactions, n, error);
598 }
599 }
600 }
601
602 /* Sends 'request' to the kernel via 'sock' and waits for a response. If
603 * successful, returns 0. On failure, returns a positive errno value.
604 *
605 * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
606 * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
607 * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
608 * reply, if any, is discarded.
609 *
610 * Before the message is sent, nlmsg_len in 'request' will be finalized to
611 * match msg->size, nlmsg_pid will be set to 'sock''s pid, and nlmsg_seq will
612 * be initialized, NLM_F_ACK will be set in nlmsg_flags.
613 *
614 * The caller is responsible for destroying 'request'.
615 *
616 * Bare Netlink is an unreliable transport protocol. This function layers
617 * reliable delivery and reply semantics on top of bare Netlink.
618 *
619 * In Netlink, sending a request to the kernel is reliable enough, because the
620 * kernel will tell us if the message cannot be queued (and we will in that
621 * case put it on the transmit queue and wait until it can be delivered).
622 *
623 * Receiving the reply is the real problem: if the socket buffer is full when
624 * the kernel tries to send the reply, the reply will be dropped. However, the
625 * kernel sets a flag that a reply has been dropped. The next call to recv
626 * then returns ENOBUFS. We can then re-send the request.
627 *
628 * Caveats:
629 *
630 * 1. Netlink depends on sequence numbers to match up requests and
631 * replies. The sender of a request supplies a sequence number, and
632 * the reply echos back that sequence number.
633 *
634 * This is fine, but (1) some kernel netlink implementations are
635 * broken, in that they fail to echo sequence numbers and (2) this
636 * function will drop packets with non-matching sequence numbers, so
637 * that only a single request can be usefully transacted at a time.
638 *
639 * 2. Resending the request causes it to be re-executed, so the request
640 * needs to be idempotent.
641 */
642 int
643 nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
644 struct ofpbuf **replyp)
645 {
646 struct nl_transaction *transactionp;
647 struct nl_transaction transaction;
648
649 transaction.request = CONST_CAST(struct ofpbuf *, request);
650 transaction.reply = replyp ? ofpbuf_new(1024) : NULL;
651 transactionp = &transaction;
652
653 nl_sock_transact_multiple(sock, &transactionp, 1);
654
655 if (replyp) {
656 if (transaction.error) {
657 ofpbuf_delete(transaction.reply);
658 *replyp = NULL;
659 } else {
660 *replyp = transaction.reply;
661 }
662 }
663
664 return transaction.error;
665 }
666
667 /* Drain all the messages currently in 'sock''s receive queue. */
668 int
669 nl_sock_drain(struct nl_sock *sock)
670 {
671 return drain_rcvbuf(sock->fd);
672 }
673
674 /* Starts a Netlink "dump" operation, by sending 'request' to the kernel on a
675 * Netlink socket created with the given 'protocol', and initializes 'dump' to
676 * reflect the state of the operation.
677 *
678 * nlmsg_len in 'msg' will be finalized to match msg->size, and nlmsg_pid will
679 * be set to the Netlink socket's pid, before the message is sent. NLM_F_DUMP
680 * and NLM_F_ACK will be set in nlmsg_flags.
681 *
682 * The design of this Netlink socket library ensures that the dump is reliable.
683 *
684 * This function provides no status indication. An error status for the entire
685 * dump operation is provided when it is completed by calling nl_dump_done().
686 *
687 * The caller is responsible for destroying 'request'.
688 */
689 void
690 nl_dump_start(struct nl_dump *dump, int protocol, const struct ofpbuf *request)
691 {
692 ofpbuf_init(&dump->buffer, 4096);
693 dump->status = nl_pool_alloc(protocol, &dump->sock);
694 if (dump->status) {
695 return;
696 }
697
698 nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
699 dump->status = nl_sock_send__(dump->sock, request,
700 nl_sock_allocate_seq(dump->sock, 1), true);
701 dump->seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
702 }
703
704 /* Helper function for nl_dump_next(). */
705 static int
706 nl_dump_recv(struct nl_dump *dump)
707 {
708 struct nlmsghdr *nlmsghdr;
709 int retval;
710
711 retval = nl_sock_recv__(dump->sock, &dump->buffer, true);
712 if (retval) {
713 return retval == EINTR ? EAGAIN : retval;
714 }
715
716 nlmsghdr = nl_msg_nlmsghdr(&dump->buffer);
717 if (dump->seq != nlmsghdr->nlmsg_seq) {
718 VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
719 nlmsghdr->nlmsg_seq, dump->seq);
720 return EAGAIN;
721 }
722
723 if (nl_msg_nlmsgerr(&dump->buffer, &retval)) {
724 VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
725 ovs_strerror(retval));
726 return retval && retval != EAGAIN ? retval : EPROTO;
727 }
728
729 return 0;
730 }
731
732 /* Attempts to retrieve another reply from 'dump', which must have been
733 * initialized with nl_dump_start().
734 *
735 * If successful, returns true and points 'reply->data' and 'reply->size' to
736 * the message that was retrieved. The caller must not modify 'reply' (because
737 * it points into the middle of a larger buffer).
738 *
739 * On failure, returns false and sets 'reply->data' to NULL and 'reply->size'
740 * to 0. Failure might indicate an actual error or merely the end of replies.
741 * An error status for the entire dump operation is provided when it is
742 * completed by calling nl_dump_done().
743 */
744 bool
745 nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply)
746 {
747 struct nlmsghdr *nlmsghdr;
748
749 reply->data = NULL;
750 reply->size = 0;
751 if (dump->status) {
752 return false;
753 }
754
755 while (!dump->buffer.size) {
756 int retval = nl_dump_recv(dump);
757 if (retval) {
758 ofpbuf_clear(&dump->buffer);
759 if (retval != EAGAIN) {
760 dump->status = retval;
761 return false;
762 }
763 }
764 }
765
766 nlmsghdr = nl_msg_next(&dump->buffer, reply);
767 if (!nlmsghdr) {
768 VLOG_WARN_RL(&rl, "netlink dump reply contains message fragment");
769 dump->status = EPROTO;
770 return false;
771 } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
772 dump->status = EOF;
773 return false;
774 }
775
776 return true;
777 }
778
779 /* Completes Netlink dump operation 'dump', which must have been initialized
780 * with nl_dump_start(). Returns 0 if the dump operation was error-free,
781 * otherwise a positive errno value describing the problem. */
782 int
783 nl_dump_done(struct nl_dump *dump)
784 {
785 /* Drain any remaining messages that the client didn't read. Otherwise the
786 * kernel will continue to queue them up and waste buffer space.
787 *
788 * XXX We could just destroy and discard the socket in this case. */
789 while (!dump->status) {
790 struct ofpbuf reply;
791 if (!nl_dump_next(dump, &reply)) {
792 ovs_assert(dump->status);
793 }
794 }
795 nl_pool_release(dump->sock);
796 ofpbuf_uninit(&dump->buffer);
797 return dump->status == EOF ? 0 : dump->status;
798 }
799
800 /* Causes poll_block() to wake up when any of the specified 'events' (which is
801 * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'. */
802 void
803 nl_sock_wait(const struct nl_sock *sock, short int events)
804 {
805 poll_fd_wait(sock->fd, events);
806 }
807
808 /* Returns the underlying fd for 'sock', for use in "poll()"-like operations
809 * that can't use nl_sock_wait().
810 *
811 * It's a little tricky to use the returned fd correctly, because nl_sock does
812 * "copy on write" to allow a single nl_sock to be used for notifications,
813 * transactions, and dumps. If 'sock' is used only for notifications and
814 * transactions (and never for dump) then the usage is safe. */
815 int
816 nl_sock_fd(const struct nl_sock *sock)
817 {
818 return sock->fd;
819 }
820
821 /* Returns the PID associated with this socket. */
822 uint32_t
823 nl_sock_pid(const struct nl_sock *sock)
824 {
825 return sock->pid;
826 }
827 \f
828 /* Miscellaneous. */
829
830 struct genl_family {
831 struct hmap_node hmap_node;
832 uint16_t id;
833 char *name;
834 };
835
836 static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
837
838 static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
839 [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
840 [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
841 };
842
843 static struct genl_family *
844 find_genl_family_by_id(uint16_t id)
845 {
846 struct genl_family *family;
847
848 HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
849 &genl_families) {
850 if (family->id == id) {
851 return family;
852 }
853 }
854 return NULL;
855 }
856
857 static void
858 define_genl_family(uint16_t id, const char *name)
859 {
860 struct genl_family *family = find_genl_family_by_id(id);
861
862 if (family) {
863 if (!strcmp(family->name, name)) {
864 return;
865 }
866 free(family->name);
867 } else {
868 family = xmalloc(sizeof *family);
869 family->id = id;
870 hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
871 }
872 family->name = xstrdup(name);
873 }
874
875 static const char *
876 genl_family_to_name(uint16_t id)
877 {
878 if (id == GENL_ID_CTRL) {
879 return "control";
880 } else {
881 struct genl_family *family = find_genl_family_by_id(id);
882 return family ? family->name : "unknown";
883 }
884 }
885
886 static int
887 do_lookup_genl_family(const char *name, struct nlattr **attrs,
888 struct ofpbuf **replyp)
889 {
890 struct nl_sock *sock;
891 struct ofpbuf request, *reply;
892 int error;
893
894 *replyp = NULL;
895 error = nl_sock_create(NETLINK_GENERIC, &sock);
896 if (error) {
897 return error;
898 }
899
900 ofpbuf_init(&request, 0);
901 nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
902 CTRL_CMD_GETFAMILY, 1);
903 nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
904 error = nl_sock_transact(sock, &request, &reply);
905 ofpbuf_uninit(&request);
906 if (error) {
907 nl_sock_destroy(sock);
908 return error;
909 }
910
911 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
912 family_policy, attrs, ARRAY_SIZE(family_policy))
913 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
914 nl_sock_destroy(sock);
915 ofpbuf_delete(reply);
916 return EPROTO;
917 }
918
919 nl_sock_destroy(sock);
920 *replyp = reply;
921 return 0;
922 }
923
924 /* Finds the multicast group called 'group_name' in genl family 'family_name'.
925 * When successful, writes its result to 'multicast_group' and returns 0.
926 * Otherwise, clears 'multicast_group' and returns a positive error code.
927 */
928 int
929 nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
930 unsigned int *multicast_group)
931 {
932 struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
933 const struct nlattr *mc;
934 struct ofpbuf *reply;
935 unsigned int left;
936 int error;
937
938 *multicast_group = 0;
939 error = do_lookup_genl_family(family_name, family_attrs, &reply);
940 if (error) {
941 return error;
942 }
943
944 if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
945 error = EPROTO;
946 goto exit;
947 }
948
949 NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
950 static const struct nl_policy mc_policy[] = {
951 [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
952 [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
953 };
954
955 struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
956 const char *mc_name;
957
958 if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
959 error = EPROTO;
960 goto exit;
961 }
962
963 mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
964 if (!strcmp(group_name, mc_name)) {
965 *multicast_group =
966 nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
967 error = 0;
968 goto exit;
969 }
970 }
971 error = EPROTO;
972
973 exit:
974 ofpbuf_delete(reply);
975 return error;
976 }
977
978 /* If '*number' is 0, translates the given Generic Netlink family 'name' to a
979 * number and stores it in '*number'. If successful, returns 0 and the caller
980 * may use '*number' as the family number. On failure, returns a positive
981 * errno value and '*number' caches the errno value. */
982 int
983 nl_lookup_genl_family(const char *name, int *number)
984 {
985 if (*number == 0) {
986 struct nlattr *attrs[ARRAY_SIZE(family_policy)];
987 struct ofpbuf *reply;
988 int error;
989
990 error = do_lookup_genl_family(name, attrs, &reply);
991 if (!error) {
992 *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
993 define_genl_family(*number, name);
994 } else {
995 *number = -error;
996 }
997 ofpbuf_delete(reply);
998
999 ovs_assert(*number != 0);
1000 }
1001 return *number > 0 ? 0 : -*number;
1002 }
1003 \f
1004 struct nl_pool {
1005 struct nl_sock *socks[16];
1006 int n;
1007 };
1008
1009 static struct ovs_mutex pool_mutex = OVS_MUTEX_INITIALIZER;
1010 static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
1011
1012 static int
1013 nl_pool_alloc(int protocol, struct nl_sock **sockp)
1014 {
1015 struct nl_sock *sock = NULL;
1016 struct nl_pool *pool;
1017
1018 ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
1019
1020 ovs_mutex_lock(&pool_mutex);
1021 pool = &pools[protocol];
1022 if (pool->n > 0) {
1023 sock = pool->socks[--pool->n];
1024 }
1025 ovs_mutex_unlock(&pool_mutex);
1026
1027 if (sock) {
1028 *sockp = sock;
1029 return 0;
1030 } else {
1031 return nl_sock_create(protocol, sockp);
1032 }
1033 }
1034
1035 static void
1036 nl_pool_release(struct nl_sock *sock)
1037 {
1038 if (sock) {
1039 struct nl_pool *pool = &pools[sock->protocol];
1040
1041 ovs_mutex_lock(&pool_mutex);
1042 if (pool->n < ARRAY_SIZE(pool->socks)) {
1043 pool->socks[pool->n++] = sock;
1044 sock = NULL;
1045 }
1046 ovs_mutex_unlock(&pool_mutex);
1047
1048 nl_sock_destroy(sock);
1049 }
1050 }
1051
1052 int
1053 nl_transact(int protocol, const struct ofpbuf *request,
1054 struct ofpbuf **replyp)
1055 {
1056 struct nl_sock *sock;
1057 int error;
1058
1059 error = nl_pool_alloc(protocol, &sock);
1060 if (error) {
1061 *replyp = NULL;
1062 return error;
1063 }
1064
1065 error = nl_sock_transact(sock, request, replyp);
1066
1067 nl_pool_release(sock);
1068 return error;
1069 }
1070
1071 void
1072 nl_transact_multiple(int protocol,
1073 struct nl_transaction **transactions, size_t n)
1074 {
1075 struct nl_sock *sock;
1076 int error;
1077
1078 error = nl_pool_alloc(protocol, &sock);
1079 if (!error) {
1080 nl_sock_transact_multiple(sock, transactions, n);
1081 nl_pool_release(sock);
1082 } else {
1083 nl_sock_record_errors__(transactions, n, error);
1084 }
1085 }
1086
1087 \f
1088 static uint32_t
1089 nl_sock_allocate_seq(struct nl_sock *sock, unsigned int n)
1090 {
1091 uint32_t seq = sock->next_seq;
1092
1093 sock->next_seq += n;
1094
1095 /* Make it impossible for the next request for sequence numbers to wrap
1096 * around to 0. Start over with 1 to avoid ever using a sequence number of
1097 * 0, because the kernel uses sequence number 0 for notifications. */
1098 if (sock->next_seq >= UINT32_MAX / 2) {
1099 sock->next_seq = 1;
1100 }
1101
1102 return seq;
1103 }
1104
1105 static void
1106 nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
1107 {
1108 struct nlmsg_flag {
1109 unsigned int bits;
1110 const char *name;
1111 };
1112 static const struct nlmsg_flag flags[] = {
1113 { NLM_F_REQUEST, "REQUEST" },
1114 { NLM_F_MULTI, "MULTI" },
1115 { NLM_F_ACK, "ACK" },
1116 { NLM_F_ECHO, "ECHO" },
1117 { NLM_F_DUMP, "DUMP" },
1118 { NLM_F_ROOT, "ROOT" },
1119 { NLM_F_MATCH, "MATCH" },
1120 { NLM_F_ATOMIC, "ATOMIC" },
1121 };
1122 const struct nlmsg_flag *flag;
1123 uint16_t flags_left;
1124
1125 ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
1126 h->nlmsg_len, h->nlmsg_type);
1127 if (h->nlmsg_type == NLMSG_NOOP) {
1128 ds_put_cstr(ds, "(no-op)");
1129 } else if (h->nlmsg_type == NLMSG_ERROR) {
1130 ds_put_cstr(ds, "(error)");
1131 } else if (h->nlmsg_type == NLMSG_DONE) {
1132 ds_put_cstr(ds, "(done)");
1133 } else if (h->nlmsg_type == NLMSG_OVERRUN) {
1134 ds_put_cstr(ds, "(overrun)");
1135 } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
1136 ds_put_cstr(ds, "(reserved)");
1137 } else if (protocol == NETLINK_GENERIC) {
1138 ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
1139 } else {
1140 ds_put_cstr(ds, "(family-defined)");
1141 }
1142 ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
1143 flags_left = h->nlmsg_flags;
1144 for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
1145 if ((flags_left & flag->bits) == flag->bits) {
1146 ds_put_format(ds, "[%s]", flag->name);
1147 flags_left &= ~flag->bits;
1148 }
1149 }
1150 if (flags_left) {
1151 ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
1152 }
1153 ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
1154 h->nlmsg_seq, h->nlmsg_pid);
1155 }
1156
1157 static char *
1158 nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
1159 {
1160 struct ds ds = DS_EMPTY_INITIALIZER;
1161 const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
1162 if (h) {
1163 nlmsghdr_to_string(h, protocol, &ds);
1164 if (h->nlmsg_type == NLMSG_ERROR) {
1165 const struct nlmsgerr *e;
1166 e = ofpbuf_at(buffer, NLMSG_HDRLEN,
1167 NLMSG_ALIGN(sizeof(struct nlmsgerr)));
1168 if (e) {
1169 ds_put_format(&ds, " error(%d", e->error);
1170 if (e->error < 0) {
1171 ds_put_format(&ds, "(%s)", ovs_strerror(-e->error));
1172 }
1173 ds_put_cstr(&ds, ", in-reply-to(");
1174 nlmsghdr_to_string(&e->msg, protocol, &ds);
1175 ds_put_cstr(&ds, "))");
1176 } else {
1177 ds_put_cstr(&ds, " error(truncated)");
1178 }
1179 } else if (h->nlmsg_type == NLMSG_DONE) {
1180 int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
1181 if (error) {
1182 ds_put_format(&ds, " done(%d", *error);
1183 if (*error < 0) {
1184 ds_put_format(&ds, "(%s)", ovs_strerror(-*error));
1185 }
1186 ds_put_cstr(&ds, ")");
1187 } else {
1188 ds_put_cstr(&ds, " done(truncated)");
1189 }
1190 } else if (protocol == NETLINK_GENERIC) {
1191 struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
1192 if (genl) {
1193 ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
1194 genl->cmd, genl->version);
1195 }
1196 }
1197 } else {
1198 ds_put_cstr(&ds, "nl(truncated)");
1199 }
1200 return ds.string;
1201 }
1202
1203 static void
1204 log_nlmsg(const char *function, int error,
1205 const void *message, size_t size, int protocol)
1206 {
1207 struct ofpbuf buffer;
1208 char *nlmsg;
1209
1210 if (!VLOG_IS_DBG_ENABLED()) {
1211 return;
1212 }
1213
1214 ofpbuf_use_const(&buffer, message, size);
1215 nlmsg = nlmsg_to_string(&buffer, protocol);
1216 VLOG_DBG_RL(&rl, "%s (%s): %s", function, ovs_strerror(error), nlmsg);
1217 free(nlmsg);
1218 }