]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/lib/rte_vhost/socket.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / lib / rte_vhost / socket.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdbool.h>
37 #include <limits.h>
38 #include <stdlib.h>
39 #include <unistd.h>
40 #include <string.h>
41 #include <sys/types.h>
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/queue.h>
45 #include <errno.h>
46 #include <fcntl.h>
47 #include <pthread.h>
48
49 #include <rte_log.h>
50
51 #include "fd_man.h"
52 #include "vhost.h"
53 #include "vhost_user.h"
54
55
56 TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
57
58 /*
59 * Every time rte_vhost_driver_register() is invoked, an associated
60 * vhost_user_socket struct will be created.
61 */
62 struct vhost_user_socket {
63 struct vhost_user_connection_list conn_list;
64 pthread_mutex_t conn_mutex;
65 char *path;
66 int socket_fd;
67 struct sockaddr_un un;
68 bool is_server;
69 bool reconnect;
70 bool dequeue_zero_copy;
71
72 /*
73 * The "supported_features" indicates the feature bits the
74 * vhost driver supports. The "features" indicates the feature
75 * bits after the rte_vhost_driver_features_disable/enable().
76 * It is also the final feature bits used for vhost-user
77 * features negotiation.
78 */
79 uint64_t supported_features;
80 uint64_t features;
81
82 struct vhost_device_ops const *notify_ops;
83 };
84
85 struct vhost_user_connection {
86 struct vhost_user_socket *vsocket;
87 int connfd;
88 int vid;
89
90 TAILQ_ENTRY(vhost_user_connection) next;
91 };
92
93 #define MAX_VHOST_SOCKET 1024
94 struct vhost_user {
95 struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
96 struct fdset fdset;
97 int vsocket_cnt;
98 pthread_mutex_t mutex;
99 };
100
101 #define MAX_VIRTIO_BACKLOG 128
102
103 static void vhost_user_server_new_connection(int fd, void *data, int *remove);
104 static void vhost_user_read_cb(int fd, void *dat, int *remove);
105 static int create_unix_socket(struct vhost_user_socket *vsocket);
106 static int vhost_user_start_client(struct vhost_user_socket *vsocket);
107
108 static struct vhost_user vhost_user = {
109 .fdset = {
110 .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
111 .fd_mutex = PTHREAD_MUTEX_INITIALIZER,
112 .num = 0
113 },
114 .vsocket_cnt = 0,
115 .mutex = PTHREAD_MUTEX_INITIALIZER,
116 };
117
118 /* return bytes# of read on success or negative val on failure. */
119 int
120 read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
121 {
122 struct iovec iov;
123 struct msghdr msgh;
124 size_t fdsize = fd_num * sizeof(int);
125 char control[CMSG_SPACE(fdsize)];
126 struct cmsghdr *cmsg;
127 int ret;
128
129 memset(&msgh, 0, sizeof(msgh));
130 iov.iov_base = buf;
131 iov.iov_len = buflen;
132
133 msgh.msg_iov = &iov;
134 msgh.msg_iovlen = 1;
135 msgh.msg_control = control;
136 msgh.msg_controllen = sizeof(control);
137
138 ret = recvmsg(sockfd, &msgh, 0);
139 if (ret <= 0) {
140 if (ret)
141 RTE_LOG(ERR, VHOST_CONFIG, "recvmsg failed, %s\n", strerror(errno));
142 else
143 RTE_LOG(INFO, VHOST_CONFIG, "peer closed\n");
144 return ret;
145 }
146
147 if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
148 RTE_LOG(ERR, VHOST_CONFIG, "truncted msg\n");
149 return -1;
150 }
151
152 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
153 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
154 if ((cmsg->cmsg_level == SOL_SOCKET) &&
155 (cmsg->cmsg_type == SCM_RIGHTS)) {
156 memcpy(fds, CMSG_DATA(cmsg), fdsize);
157 break;
158 }
159 }
160
161 return ret;
162 }
163
164 int
165 send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
166 {
167
168 struct iovec iov;
169 struct msghdr msgh;
170 size_t fdsize = fd_num * sizeof(int);
171 char control[CMSG_SPACE(fdsize)];
172 struct cmsghdr *cmsg;
173 int ret;
174
175 memset(&msgh, 0, sizeof(msgh));
176 iov.iov_base = buf;
177 iov.iov_len = buflen;
178
179 msgh.msg_iov = &iov;
180 msgh.msg_iovlen = 1;
181
182 if (fds && fd_num > 0) {
183 msgh.msg_control = control;
184 msgh.msg_controllen = sizeof(control);
185 cmsg = CMSG_FIRSTHDR(&msgh);
186 if (cmsg == NULL) {
187 RTE_LOG(ERR, VHOST_CONFIG, "cmsg == NULL\n");
188 errno = EINVAL;
189 return -1;
190 }
191 cmsg->cmsg_len = CMSG_LEN(fdsize);
192 cmsg->cmsg_level = SOL_SOCKET;
193 cmsg->cmsg_type = SCM_RIGHTS;
194 memcpy(CMSG_DATA(cmsg), fds, fdsize);
195 } else {
196 msgh.msg_control = NULL;
197 msgh.msg_controllen = 0;
198 }
199
200 do {
201 ret = sendmsg(sockfd, &msgh, 0);
202 } while (ret < 0 && errno == EINTR);
203
204 if (ret < 0) {
205 RTE_LOG(ERR, VHOST_CONFIG, "sendmsg error\n");
206 return ret;
207 }
208
209 return ret;
210 }
211
212 static void
213 vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
214 {
215 int vid;
216 size_t size;
217 struct vhost_user_connection *conn;
218 int ret;
219
220 conn = malloc(sizeof(*conn));
221 if (conn == NULL) {
222 close(fd);
223 return;
224 }
225
226 vid = vhost_new_device(vsocket->features, vsocket->notify_ops);
227 if (vid == -1) {
228 goto err;
229 }
230
231 size = strnlen(vsocket->path, PATH_MAX);
232 vhost_set_ifname(vid, vsocket->path, size);
233
234 if (vsocket->dequeue_zero_copy)
235 vhost_enable_dequeue_zero_copy(vid);
236
237 RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
238
239 if (vsocket->notify_ops->new_connection) {
240 ret = vsocket->notify_ops->new_connection(vid);
241 if (ret < 0) {
242 RTE_LOG(ERR, VHOST_CONFIG,
243 "failed to add vhost user connection with fd %d\n",
244 fd);
245 goto err;
246 }
247 }
248
249 conn->connfd = fd;
250 conn->vsocket = vsocket;
251 conn->vid = vid;
252 ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
253 NULL, conn);
254 if (ret < 0) {
255 RTE_LOG(ERR, VHOST_CONFIG,
256 "failed to add fd %d into vhost server fdset\n",
257 fd);
258
259 if (vsocket->notify_ops->destroy_connection)
260 vsocket->notify_ops->destroy_connection(conn->vid);
261
262 goto err;
263 }
264
265 pthread_mutex_lock(&vsocket->conn_mutex);
266 TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
267 pthread_mutex_unlock(&vsocket->conn_mutex);
268 return;
269
270 err:
271 free(conn);
272 close(fd);
273 }
274
275 /* call back when there is new vhost-user connection from client */
276 static void
277 vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
278 {
279 struct vhost_user_socket *vsocket = dat;
280
281 fd = accept(fd, NULL, NULL);
282 if (fd < 0)
283 return;
284
285 RTE_LOG(INFO, VHOST_CONFIG, "new vhost user connection is %d\n", fd);
286 vhost_user_add_connection(fd, vsocket);
287 }
288
289 static void
290 vhost_user_read_cb(int connfd, void *dat, int *remove)
291 {
292 struct vhost_user_connection *conn = dat;
293 struct vhost_user_socket *vsocket = conn->vsocket;
294 int ret;
295
296 ret = vhost_user_msg_handler(conn->vid, connfd);
297 if (ret < 0) {
298 *remove = 1;
299 vhost_destroy_device(conn->vid);
300
301 if (vsocket->notify_ops->destroy_connection)
302 vsocket->notify_ops->destroy_connection(conn->vid);
303
304 pthread_mutex_lock(&vsocket->conn_mutex);
305 TAILQ_REMOVE(&vsocket->conn_list, conn, next);
306 if (conn->connfd != -1) {
307 close(conn->connfd);
308 conn->connfd = -1;
309 }
310 pthread_mutex_unlock(&vsocket->conn_mutex);
311
312 free(conn);
313
314 if (vsocket->reconnect) {
315 create_unix_socket(vsocket);
316 vhost_user_start_client(vsocket);
317 }
318 }
319 }
320
321 static int
322 create_unix_socket(struct vhost_user_socket *vsocket)
323 {
324 int fd;
325 struct sockaddr_un *un = &vsocket->un;
326
327 fd = socket(AF_UNIX, SOCK_STREAM, 0);
328 if (fd < 0)
329 return -1;
330 RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n",
331 vsocket->is_server ? "server" : "client", fd);
332
333 if (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
334 RTE_LOG(ERR, VHOST_CONFIG,
335 "vhost-user: can't set nonblocking mode for socket, fd: "
336 "%d (%s)\n", fd, strerror(errno));
337 close(fd);
338 return -1;
339 }
340
341 memset(un, 0, sizeof(*un));
342 un->sun_family = AF_UNIX;
343 strncpy(un->sun_path, vsocket->path, sizeof(un->sun_path));
344 un->sun_path[sizeof(un->sun_path) - 1] = '\0';
345
346 vsocket->socket_fd = fd;
347 return 0;
348 }
349
350 static int
351 vhost_user_start_server(struct vhost_user_socket *vsocket)
352 {
353 int ret;
354 int fd = vsocket->socket_fd;
355 const char *path = vsocket->path;
356
357 ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
358 if (ret < 0) {
359 RTE_LOG(ERR, VHOST_CONFIG,
360 "failed to bind to %s: %s; remove it and try again\n",
361 path, strerror(errno));
362 goto err;
363 }
364 RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path);
365
366 ret = listen(fd, MAX_VIRTIO_BACKLOG);
367 if (ret < 0)
368 goto err;
369
370 ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
371 NULL, vsocket);
372 if (ret < 0) {
373 RTE_LOG(ERR, VHOST_CONFIG,
374 "failed to add listen fd %d to vhost server fdset\n",
375 fd);
376 goto err;
377 }
378
379 return 0;
380
381 err:
382 close(fd);
383 return -1;
384 }
385
386 struct vhost_user_reconnect {
387 struct sockaddr_un un;
388 int fd;
389 struct vhost_user_socket *vsocket;
390
391 TAILQ_ENTRY(vhost_user_reconnect) next;
392 };
393
394 TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
395 struct vhost_user_reconnect_list {
396 struct vhost_user_reconnect_tailq_list head;
397 pthread_mutex_t mutex;
398 };
399
400 static struct vhost_user_reconnect_list reconn_list;
401 static pthread_t reconn_tid;
402
403 static int
404 vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)
405 {
406 int ret, flags;
407
408 ret = connect(fd, un, sz);
409 if (ret < 0 && errno != EISCONN)
410 return -1;
411
412 flags = fcntl(fd, F_GETFL, 0);
413 if (flags < 0) {
414 RTE_LOG(ERR, VHOST_CONFIG,
415 "can't get flags for connfd %d\n", fd);
416 return -2;
417 }
418 if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
419 RTE_LOG(ERR, VHOST_CONFIG,
420 "can't disable nonblocking on fd %d\n", fd);
421 return -2;
422 }
423 return 0;
424 }
425
426 static void *
427 vhost_user_client_reconnect(void *arg __rte_unused)
428 {
429 int ret;
430 struct vhost_user_reconnect *reconn, *next;
431
432 while (1) {
433 pthread_mutex_lock(&reconn_list.mutex);
434
435 /*
436 * An equal implementation of TAILQ_FOREACH_SAFE,
437 * which does not exist on all platforms.
438 */
439 for (reconn = TAILQ_FIRST(&reconn_list.head);
440 reconn != NULL; reconn = next) {
441 next = TAILQ_NEXT(reconn, next);
442
443 ret = vhost_user_connect_nonblock(reconn->fd,
444 (struct sockaddr *)&reconn->un,
445 sizeof(reconn->un));
446 if (ret == -2) {
447 close(reconn->fd);
448 RTE_LOG(ERR, VHOST_CONFIG,
449 "reconnection for fd %d failed\n",
450 reconn->fd);
451 goto remove_fd;
452 }
453 if (ret == -1)
454 continue;
455
456 RTE_LOG(INFO, VHOST_CONFIG,
457 "%s: connected\n", reconn->vsocket->path);
458 vhost_user_add_connection(reconn->fd, reconn->vsocket);
459 remove_fd:
460 TAILQ_REMOVE(&reconn_list.head, reconn, next);
461 free(reconn);
462 }
463
464 pthread_mutex_unlock(&reconn_list.mutex);
465 sleep(1);
466 }
467
468 return NULL;
469 }
470
471 static int
472 vhost_user_reconnect_init(void)
473 {
474 int ret;
475
476 pthread_mutex_init(&reconn_list.mutex, NULL);
477 TAILQ_INIT(&reconn_list.head);
478
479 ret = pthread_create(&reconn_tid, NULL,
480 vhost_user_client_reconnect, NULL);
481 if (ret < 0)
482 RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
483
484 return ret;
485 }
486
487 static int
488 vhost_user_start_client(struct vhost_user_socket *vsocket)
489 {
490 int ret;
491 int fd = vsocket->socket_fd;
492 const char *path = vsocket->path;
493 struct vhost_user_reconnect *reconn;
494
495 ret = vhost_user_connect_nonblock(fd, (struct sockaddr *)&vsocket->un,
496 sizeof(vsocket->un));
497 if (ret == 0) {
498 vhost_user_add_connection(fd, vsocket);
499 return 0;
500 }
501
502 RTE_LOG(WARNING, VHOST_CONFIG,
503 "failed to connect to %s: %s\n",
504 path, strerror(errno));
505
506 if (ret == -2 || !vsocket->reconnect) {
507 close(fd);
508 return -1;
509 }
510
511 RTE_LOG(INFO, VHOST_CONFIG, "%s: reconnecting...\n", path);
512 reconn = malloc(sizeof(*reconn));
513 if (reconn == NULL) {
514 RTE_LOG(ERR, VHOST_CONFIG,
515 "failed to allocate memory for reconnect\n");
516 close(fd);
517 return -1;
518 }
519 reconn->un = vsocket->un;
520 reconn->fd = fd;
521 reconn->vsocket = vsocket;
522 pthread_mutex_lock(&reconn_list.mutex);
523 TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
524 pthread_mutex_unlock(&reconn_list.mutex);
525
526 return 0;
527 }
528
529 static struct vhost_user_socket *
530 find_vhost_user_socket(const char *path)
531 {
532 int i;
533
534 for (i = 0; i < vhost_user.vsocket_cnt; i++) {
535 struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
536
537 if (!strcmp(vsocket->path, path))
538 return vsocket;
539 }
540
541 return NULL;
542 }
543
544 int
545 rte_vhost_driver_disable_features(const char *path, uint64_t features)
546 {
547 struct vhost_user_socket *vsocket;
548
549 pthread_mutex_lock(&vhost_user.mutex);
550 vsocket = find_vhost_user_socket(path);
551 if (vsocket)
552 vsocket->features &= ~features;
553 pthread_mutex_unlock(&vhost_user.mutex);
554
555 return vsocket ? 0 : -1;
556 }
557
558 int
559 rte_vhost_driver_enable_features(const char *path, uint64_t features)
560 {
561 struct vhost_user_socket *vsocket;
562
563 pthread_mutex_lock(&vhost_user.mutex);
564 vsocket = find_vhost_user_socket(path);
565 if (vsocket) {
566 if ((vsocket->supported_features & features) != features) {
567 /*
568 * trying to enable features the driver doesn't
569 * support.
570 */
571 pthread_mutex_unlock(&vhost_user.mutex);
572 return -1;
573 }
574 vsocket->features |= features;
575 }
576 pthread_mutex_unlock(&vhost_user.mutex);
577
578 return vsocket ? 0 : -1;
579 }
580
581 int
582 rte_vhost_driver_set_features(const char *path, uint64_t features)
583 {
584 struct vhost_user_socket *vsocket;
585
586 pthread_mutex_lock(&vhost_user.mutex);
587 vsocket = find_vhost_user_socket(path);
588 if (vsocket) {
589 vsocket->supported_features = features;
590 vsocket->features = features;
591 }
592 pthread_mutex_unlock(&vhost_user.mutex);
593
594 return vsocket ? 0 : -1;
595 }
596
597 int
598 rte_vhost_driver_get_features(const char *path, uint64_t *features)
599 {
600 struct vhost_user_socket *vsocket;
601
602 pthread_mutex_lock(&vhost_user.mutex);
603 vsocket = find_vhost_user_socket(path);
604 if (vsocket)
605 *features = vsocket->features;
606 pthread_mutex_unlock(&vhost_user.mutex);
607
608 if (!vsocket) {
609 RTE_LOG(ERR, VHOST_CONFIG,
610 "socket file %s is not registered yet.\n", path);
611 return -1;
612 } else {
613 return 0;
614 }
615 }
616
617 /*
618 * Register a new vhost-user socket; here we could act as server
619 * (the default case), or client (when RTE_VHOST_USER_CLIENT) flag
620 * is set.
621 */
622 int
623 rte_vhost_driver_register(const char *path, uint64_t flags)
624 {
625 int ret = -1;
626 struct vhost_user_socket *vsocket;
627
628 if (!path)
629 return -1;
630
631 pthread_mutex_lock(&vhost_user.mutex);
632
633 if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
634 RTE_LOG(ERR, VHOST_CONFIG,
635 "error: the number of vhost sockets reaches maximum\n");
636 goto out;
637 }
638
639 vsocket = malloc(sizeof(struct vhost_user_socket));
640 if (!vsocket)
641 goto out;
642 memset(vsocket, 0, sizeof(struct vhost_user_socket));
643 vsocket->path = strdup(path);
644 if (!vsocket->path) {
645 free(vsocket);
646 goto out;
647 }
648 TAILQ_INIT(&vsocket->conn_list);
649 vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
650
651 /*
652 * Set the supported features correctly for the builtin vhost-user
653 * net driver.
654 *
655 * Applications know nothing about features the builtin virtio net
656 * driver (virtio_net.c) supports, thus it's not possible for them
657 * to invoke rte_vhost_driver_set_features(). To workaround it, here
658 * we set it unconditionally. If the application want to implement
659 * another vhost-user driver (say SCSI), it should call the
660 * rte_vhost_driver_set_features(), which will overwrite following
661 * two values.
662 */
663 vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES;
664 vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES;
665
666 if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
667 vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
668 if (vsocket->reconnect && reconn_tid == 0) {
669 if (vhost_user_reconnect_init() < 0) {
670 free(vsocket->path);
671 free(vsocket);
672 goto out;
673 }
674 }
675 } else {
676 vsocket->is_server = true;
677 }
678 ret = create_unix_socket(vsocket);
679 if (ret < 0) {
680 free(vsocket->path);
681 free(vsocket);
682 goto out;
683 }
684
685 pthread_mutex_init(&vsocket->conn_mutex, NULL);
686 vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
687
688 out:
689 pthread_mutex_unlock(&vhost_user.mutex);
690
691 return ret;
692 }
693
694 static bool
695 vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
696 {
697 int found = false;
698 struct vhost_user_reconnect *reconn, *next;
699
700 pthread_mutex_lock(&reconn_list.mutex);
701
702 for (reconn = TAILQ_FIRST(&reconn_list.head);
703 reconn != NULL; reconn = next) {
704 next = TAILQ_NEXT(reconn, next);
705
706 if (reconn->vsocket == vsocket) {
707 TAILQ_REMOVE(&reconn_list.head, reconn, next);
708 close(reconn->fd);
709 free(reconn);
710 found = true;
711 break;
712 }
713 }
714 pthread_mutex_unlock(&reconn_list.mutex);
715 return found;
716 }
717
718 /**
719 * Unregister the specified vhost socket
720 */
721 int
722 rte_vhost_driver_unregister(const char *path)
723 {
724 int i;
725 int count;
726 struct vhost_user_connection *conn;
727
728 pthread_mutex_lock(&vhost_user.mutex);
729
730 for (i = 0; i < vhost_user.vsocket_cnt; i++) {
731 struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
732
733 if (!strcmp(vsocket->path, path)) {
734 if (vsocket->is_server) {
735 fdset_del(&vhost_user.fdset, vsocket->socket_fd);
736 close(vsocket->socket_fd);
737 unlink(path);
738 } else if (vsocket->reconnect) {
739 vhost_user_remove_reconnect(vsocket);
740 }
741
742 pthread_mutex_lock(&vsocket->conn_mutex);
743 TAILQ_FOREACH(conn, &vsocket->conn_list, next) {
744 close(conn->connfd);
745 conn->connfd = -1;
746 }
747 pthread_mutex_unlock(&vsocket->conn_mutex);
748
749 do {
750 pthread_mutex_lock(&vsocket->conn_mutex);
751 conn = TAILQ_FIRST(&vsocket->conn_list);
752 pthread_mutex_unlock(&vsocket->conn_mutex);
753 } while (conn != NULL);
754
755 free(vsocket->path);
756 free(vsocket);
757
758 count = --vhost_user.vsocket_cnt;
759 vhost_user.vsockets[i] = vhost_user.vsockets[count];
760 vhost_user.vsockets[count] = NULL;
761 pthread_mutex_unlock(&vhost_user.mutex);
762
763 return 0;
764 }
765 }
766 pthread_mutex_unlock(&vhost_user.mutex);
767
768 return -1;
769 }
770
771 /*
772 * Register ops so that we can add/remove device to data core.
773 */
774 int
775 rte_vhost_driver_callback_register(const char *path,
776 struct vhost_device_ops const * const ops)
777 {
778 struct vhost_user_socket *vsocket;
779
780 pthread_mutex_lock(&vhost_user.mutex);
781 vsocket = find_vhost_user_socket(path);
782 if (vsocket)
783 vsocket->notify_ops = ops;
784 pthread_mutex_unlock(&vhost_user.mutex);
785
786 return vsocket ? 0 : -1;
787 }
788
789 struct vhost_device_ops const *
790 vhost_driver_callback_get(const char *path)
791 {
792 struct vhost_user_socket *vsocket;
793
794 pthread_mutex_lock(&vhost_user.mutex);
795 vsocket = find_vhost_user_socket(path);
796 pthread_mutex_unlock(&vhost_user.mutex);
797
798 return vsocket ? vsocket->notify_ops : NULL;
799 }
800
801 int
802 rte_vhost_driver_start(const char *path)
803 {
804 struct vhost_user_socket *vsocket;
805 static pthread_t fdset_tid;
806
807 pthread_mutex_lock(&vhost_user.mutex);
808 vsocket = find_vhost_user_socket(path);
809 pthread_mutex_unlock(&vhost_user.mutex);
810
811 if (!vsocket)
812 return -1;
813
814 if (fdset_tid == 0) {
815 rte_cpuset_t orig_cpuset;
816 rte_cpuset_t tmp_cpuset;
817 long num_cores, i;
818 int ret;
819
820 CPU_ZERO(&tmp_cpuset);
821 num_cores = sysconf(_SC_NPROCESSORS_CONF);
822 /* Create a mask containing all CPUs */
823 for (i = 0; i < num_cores; i++) {
824 CPU_SET(i, &tmp_cpuset);
825 }
826
827 rte_thread_get_affinity(&orig_cpuset);
828 rte_thread_set_affinity(&tmp_cpuset);
829 ret = pthread_create(&fdset_tid, NULL, fdset_event_dispatch,
830 &vhost_user.fdset);
831 rte_thread_set_affinity(&orig_cpuset);
832 if (ret < 0)
833 RTE_LOG(ERR, VHOST_CONFIG,
834 "failed to create fdset handling thread");
835 }
836
837 if (vsocket->is_server)
838 return vhost_user_start_server(vsocket);
839 else
840 return vhost_user_start_client(vsocket);
841 }