4 * Copyright(c) Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
37 #include "spdk/likely.h"
38 #include "spdk/string.h"
39 #include "spdk/util.h"
40 #include "spdk/barrier.h"
42 #include "spdk/vhost.h"
43 #include "vhost_internal.h"
45 static uint32_t *g_num_ctrlrs
;
47 /* Path to folder where character device will be created. Can be set by user. */
48 static char dev_dirname
[PATH_MAX
] = "";
50 struct spdk_vhost_dev_event_ctx
{
51 /** Pointer to the controller obtained before enqueuing the event */
52 struct spdk_vhost_dev
*vdev
;
54 /** ID of the vdev to send event to. */
57 /** User callback function to be executed on given lcore. */
58 spdk_vhost_event_fn cb_fn
;
60 /** Semaphore used to signal that event is done. */
63 /** Response to be written by enqueued event. */
67 static int new_connection(int vid
);
68 static int start_device(int vid
);
69 static void stop_device(int vid
);
70 static void destroy_connection(int vid
);
71 static int get_config(int vid
, uint8_t *config
, uint32_t len
);
72 static int set_config(int vid
, uint8_t *config
, uint32_t offset
,
73 uint32_t size
, uint32_t flags
);
75 const struct vhost_device_ops g_spdk_vhost_ops
= {
76 .new_device
= start_device
,
77 .destroy_device
= stop_device
,
78 .get_config
= get_config
,
79 .set_config
= set_config
,
80 .new_connection
= new_connection
,
81 .destroy_connection
= destroy_connection
,
82 .vhost_nvme_admin_passthrough
= spdk_vhost_nvme_admin_passthrough
,
83 .vhost_nvme_set_cq_call
= spdk_vhost_nvme_set_cq_call
,
84 .vhost_nvme_get_cap
= spdk_vhost_nvme_get_cap
,
87 static TAILQ_HEAD(, spdk_vhost_dev
) g_spdk_vhost_devices
= TAILQ_HEAD_INITIALIZER(
88 g_spdk_vhost_devices
);
89 static pthread_mutex_t g_spdk_vhost_mutex
= PTHREAD_MUTEX_INITIALIZER
;
91 void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev
*vdev
, uint64_t addr
, uint64_t len
)
97 vva
= (void *)rte_vhost_va_from_guest_pa(vdev
->mem
, addr
, &newlen
);
107 spdk_vhost_log_req_desc(struct spdk_vhost_dev
*vdev
, struct spdk_vhost_virtqueue
*virtqueue
,
110 struct vring_desc
*desc
, *desc_table
;
111 uint32_t desc_table_size
;
114 if (spdk_likely(!spdk_vhost_dev_has_feature(vdev
, VHOST_F_LOG_ALL
))) {
118 rc
= spdk_vhost_vq_get_desc(vdev
, virtqueue
, req_id
, &desc
, &desc_table
, &desc_table_size
);
119 if (spdk_unlikely(rc
!= 0)) {
120 SPDK_ERRLOG("Can't log used ring descriptors!\n");
125 if (spdk_vhost_vring_desc_is_wr(desc
)) {
126 /* To be honest, only pages realy touched should be logged, but
127 * doing so would require tracking those changes in each backed.
128 * Also backend most likely will touch all/most of those pages so
129 * for lets assume we touched all pages passed to as writeable buffers. */
130 rte_vhost_log_write(vdev
->vid
, desc
->addr
, desc
->len
);
132 spdk_vhost_vring_desc_get_next(&desc
, desc_table
, desc_table_size
);
137 spdk_vhost_log_used_vring_elem(struct spdk_vhost_dev
*vdev
, struct spdk_vhost_virtqueue
*virtqueue
,
140 uint64_t offset
, len
;
143 if (spdk_likely(!spdk_vhost_dev_has_feature(vdev
, VHOST_F_LOG_ALL
))) {
147 offset
= offsetof(struct vring_used
, ring
[idx
]);
148 len
= sizeof(virtqueue
->vring
.used
->ring
[idx
]);
149 vq_idx
= virtqueue
- vdev
->virtqueue
;
151 rte_vhost_log_used_vring(vdev
->vid
, vq_idx
, offset
, len
);
155 spdk_vhost_log_used_vring_idx(struct spdk_vhost_dev
*vdev
, struct spdk_vhost_virtqueue
*virtqueue
)
157 uint64_t offset
, len
;
160 if (spdk_likely(!spdk_vhost_dev_has_feature(vdev
, VHOST_F_LOG_ALL
))) {
164 offset
= offsetof(struct vring_used
, idx
);
165 len
= sizeof(virtqueue
->vring
.used
->idx
);
166 vq_idx
= virtqueue
- vdev
->virtqueue
;
168 rte_vhost_log_used_vring(vdev
->vid
, vq_idx
, offset
, len
);
172 * Get available requests from avail ring.
175 spdk_vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue
*virtqueue
, uint16_t *reqs
,
178 struct rte_vhost_vring
*vring
= &virtqueue
->vring
;
179 struct vring_avail
*avail
= vring
->avail
;
180 uint16_t size_mask
= vring
->size
- 1;
181 uint16_t last_idx
= vring
->last_avail_idx
, avail_idx
= avail
->idx
;
184 count
= avail_idx
- last_idx
;
185 if (spdk_likely(count
== 0)) {
189 if (spdk_unlikely(count
> vring
->size
)) {
190 /* TODO: the queue is unrecoverably broken and should be marked so.
191 * For now we will fail silently and report there are no new avail entries.
196 count
= spdk_min(count
, reqs_len
);
197 vring
->last_avail_idx
+= count
;
198 for (i
= 0; i
< count
; i
++) {
199 reqs
[i
] = vring
->avail
->ring
[(last_idx
+ i
) & size_mask
];
202 SPDK_DEBUGLOG(SPDK_LOG_VHOST_RING
,
203 "AVAIL: last_idx=%"PRIu16
" avail_idx=%"PRIu16
" count=%"PRIu16
"\n",
204 last_idx
, avail_idx
, count
);
210 spdk_vhost_vring_desc_is_indirect(struct vring_desc
*cur_desc
)
212 return !!(cur_desc
->flags
& VRING_DESC_F_INDIRECT
);
216 spdk_vhost_vq_get_desc(struct spdk_vhost_dev
*vdev
, struct spdk_vhost_virtqueue
*virtqueue
,
217 uint16_t req_idx
, struct vring_desc
**desc
, struct vring_desc
**desc_table
,
218 uint32_t *desc_table_size
)
220 if (spdk_unlikely(req_idx
>= virtqueue
->vring
.size
)) {
224 *desc
= &virtqueue
->vring
.desc
[req_idx
];
226 if (spdk_vhost_vring_desc_is_indirect(*desc
)) {
227 assert(spdk_vhost_dev_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
));
228 *desc_table_size
= (*desc
)->len
/ sizeof(**desc
);
229 *desc_table
= spdk_vhost_gpa_to_vva(vdev
, (*desc
)->addr
,
230 sizeof(**desc
) * *desc_table_size
);
239 *desc_table
= virtqueue
->vring
.desc
;
240 *desc_table_size
= virtqueue
->vring
.size
;
246 spdk_vhost_vq_used_signal(struct spdk_vhost_dev
*vdev
, struct spdk_vhost_virtqueue
*virtqueue
)
248 if (virtqueue
->used_req_cnt
== 0) {
252 virtqueue
->req_cnt
+= virtqueue
->used_req_cnt
;
253 virtqueue
->used_req_cnt
= 0;
255 SPDK_DEBUGLOG(SPDK_LOG_VHOST_RING
,
256 "Queue %td - USED RING: sending IRQ: last used %"PRIu16
"\n",
257 virtqueue
- vdev
->virtqueue
, virtqueue
->vring
.last_used_idx
);
259 eventfd_write(virtqueue
->vring
.callfd
, (eventfd_t
)1);
265 check_dev_io_stats(struct spdk_vhost_dev
*vdev
, uint64_t now
)
267 struct spdk_vhost_virtqueue
*virtqueue
;
268 uint32_t irq_delay_base
= vdev
->coalescing_delay_time_base
;
269 uint32_t io_threshold
= vdev
->coalescing_io_rate_threshold
;
274 if (now
< vdev
->next_stats_check_time
) {
278 vdev
->next_stats_check_time
= now
+ vdev
->stats_check_interval
;
279 for (q_idx
= 0; q_idx
< vdev
->max_queues
; q_idx
++) {
280 virtqueue
= &vdev
->virtqueue
[q_idx
];
282 req_cnt
= virtqueue
->req_cnt
+ virtqueue
->used_req_cnt
;
283 if (req_cnt
<= io_threshold
) {
287 irq_delay
= (irq_delay_base
* (req_cnt
- io_threshold
)) / io_threshold
;
288 virtqueue
->irq_delay_time
= (uint32_t) spdk_max(0, irq_delay
);
290 virtqueue
->req_cnt
= 0;
291 virtqueue
->next_event_time
= now
;
296 spdk_vhost_dev_used_signal(struct spdk_vhost_dev
*vdev
)
298 struct spdk_vhost_virtqueue
*virtqueue
;
302 if (vdev
->coalescing_delay_time_base
== 0) {
303 for (q_idx
= 0; q_idx
< vdev
->max_queues
; q_idx
++) {
304 virtqueue
= &vdev
->virtqueue
[q_idx
];
306 if (virtqueue
->vring
.desc
== NULL
||
307 (virtqueue
->vring
.avail
->flags
& VRING_AVAIL_F_NO_INTERRUPT
)) {
311 spdk_vhost_vq_used_signal(vdev
, virtqueue
);
314 now
= spdk_get_ticks();
315 check_dev_io_stats(vdev
, now
);
317 for (q_idx
= 0; q_idx
< vdev
->max_queues
; q_idx
++) {
318 virtqueue
= &vdev
->virtqueue
[q_idx
];
320 /* No need for event right now */
321 if (now
< virtqueue
->next_event_time
||
322 (virtqueue
->vring
.avail
->flags
& VRING_AVAIL_F_NO_INTERRUPT
)) {
326 if (!spdk_vhost_vq_used_signal(vdev
, virtqueue
)) {
330 /* Syscall is quite long so update time */
331 now
= spdk_get_ticks();
332 virtqueue
->next_event_time
= now
+ virtqueue
->irq_delay_time
;
338 spdk_vhost_set_coalescing(struct spdk_vhost_dev
*vdev
, uint32_t delay_base_us
,
339 uint32_t iops_threshold
)
341 uint64_t delay_time_base
= delay_base_us
* spdk_get_ticks_hz() / 1000000ULL;
342 uint32_t io_rate
= iops_threshold
* SPDK_VHOST_DEV_STATS_CHECK_INTERVAL_MS
/ 1000U;
344 if (delay_time_base
>= UINT32_MAX
) {
345 SPDK_ERRLOG("Delay time of %"PRIu32
" is to big\n", delay_base_us
);
347 } else if (io_rate
== 0) {
348 SPDK_ERRLOG("IOPS rate of %"PRIu32
" is too low. Min is %u\n", io_rate
,
349 1000U / SPDK_VHOST_DEV_STATS_CHECK_INTERVAL_MS
);
353 vdev
->coalescing_delay_time_base
= delay_time_base
;
354 vdev
->coalescing_io_rate_threshold
= io_rate
;
356 vdev
->coalescing_delay_us
= delay_base_us
;
357 vdev
->coalescing_iops_threshold
= iops_threshold
;
362 spdk_vhost_get_coalescing(struct spdk_vhost_dev
*vdev
, uint32_t *delay_base_us
,
363 uint32_t *iops_threshold
)
366 *delay_base_us
= vdev
->coalescing_delay_us
;
369 if (iops_threshold
) {
370 *iops_threshold
= vdev
->coalescing_iops_threshold
;
375 * Enqueue id and len to used ring.
378 spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev
*vdev
, struct spdk_vhost_virtqueue
*virtqueue
,
379 uint16_t id
, uint32_t len
)
381 struct rte_vhost_vring
*vring
= &virtqueue
->vring
;
382 struct vring_used
*used
= vring
->used
;
383 uint16_t last_idx
= vring
->last_used_idx
& (vring
->size
- 1);
385 SPDK_DEBUGLOG(SPDK_LOG_VHOST_RING
,
386 "Queue %td - USED RING: last_idx=%"PRIu16
" req id=%"PRIu16
" len=%"PRIu32
"\n",
387 virtqueue
- vdev
->virtqueue
, vring
->last_used_idx
, id
, len
);
389 spdk_vhost_log_req_desc(vdev
, virtqueue
, id
);
391 vring
->last_used_idx
++;
392 used
->ring
[last_idx
].id
= id
;
393 used
->ring
[last_idx
].len
= len
;
395 /* Ensure the used ring is updated before we log it or increment used->idx. */
398 spdk_vhost_log_used_vring_elem(vdev
, virtqueue
, last_idx
);
399 * (volatile uint16_t *) &used
->idx
= vring
->last_used_idx
;
400 spdk_vhost_log_used_vring_idx(vdev
, virtqueue
);
402 /* Ensure all our used ring changes are visible to the guest at the time
404 * TODO: this is currently an sfence on x86. For other architectures we
405 * will most likely need an smp_mb(), but smp_mb() is an overkill for x86.
409 virtqueue
->used_req_cnt
++;
413 spdk_vhost_vring_desc_get_next(struct vring_desc
**desc
,
414 struct vring_desc
*desc_table
, uint32_t desc_table_size
)
416 struct vring_desc
*old_desc
= *desc
;
419 if ((old_desc
->flags
& VRING_DESC_F_NEXT
) == 0) {
424 next_idx
= old_desc
->next
;
425 if (spdk_unlikely(next_idx
>= desc_table_size
)) {
430 *desc
= &desc_table
[next_idx
];
435 spdk_vhost_vring_desc_is_wr(struct vring_desc
*cur_desc
)
437 return !!(cur_desc
->flags
& VRING_DESC_F_WRITE
);
440 #define _2MB_OFFSET(ptr) ((ptr) & (0x200000 - 1))
443 spdk_vhost_vring_desc_to_iov(struct spdk_vhost_dev
*vdev
, struct iovec
*iov
,
444 uint16_t *iov_index
, const struct vring_desc
*desc
)
446 uint32_t remaining
= desc
->len
;
447 uint32_t to_boundary
;
449 uintptr_t payload
= desc
->addr
;
453 if (*iov_index
>= SPDK_VHOST_IOVS_MAX
) {
454 SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX
);
457 vva
= (uintptr_t)rte_vhost_gpa_to_vva(vdev
->mem
, payload
);
459 SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload
);
462 to_boundary
= 0x200000 - _2MB_OFFSET(payload
);
463 if (spdk_likely(remaining
<= to_boundary
)) {
467 * Descriptor crosses a 2MB hugepage boundary. vhost memory regions are allocated
468 * from hugepage memory, so this means this descriptor may be described by
469 * discontiguous vhost memory regions. Do not blindly split on the 2MB boundary,
470 * only split it if the two sides of the boundary do not map to the same vhost
471 * memory region. This helps ensure we do not exceed the max number of IOVs
472 * defined by SPDK_VHOST_IOVS_MAX.
475 while (len
< remaining
) {
476 if (vva
+ len
!= (uintptr_t)rte_vhost_gpa_to_vva(vdev
->mem
, payload
+ len
)) {
479 len
+= spdk_min(remaining
- len
, 0x200000);
482 iov
[*iov_index
].iov_base
= (void *)vva
;
483 iov
[*iov_index
].iov_len
= len
;
492 static struct spdk_vhost_dev
*
493 spdk_vhost_dev_find_by_id(unsigned id
)
495 struct spdk_vhost_dev
*vdev
;
497 TAILQ_FOREACH(vdev
, &g_spdk_vhost_devices
, tailq
) {
498 if (vdev
->id
== id
) {
506 static struct spdk_vhost_dev
*
507 spdk_vhost_dev_find_by_vid(int vid
)
509 struct spdk_vhost_dev
*vdev
;
511 TAILQ_FOREACH(vdev
, &g_spdk_vhost_devices
, tailq
) {
512 if (vdev
->vid
== vid
) {
521 #define SIZE_2MB (1ULL << SHIFT_2MB)
522 #define FLOOR_2MB(x) (((uintptr_t)x) / SIZE_2MB) << SHIFT_2MB
523 #define CEIL_2MB(x) ((((uintptr_t)x) + SIZE_2MB - 1) / SIZE_2MB) << SHIFT_2MB
526 spdk_vhost_dev_mem_register(struct spdk_vhost_dev
*vdev
)
528 struct rte_vhost_mem_region
*region
;
531 for (i
= 0; i
< vdev
->mem
->nregions
; i
++) {
532 uint64_t start
, end
, len
;
533 region
= &vdev
->mem
->regions
[i
];
534 start
= FLOOR_2MB(region
->mmap_addr
);
535 end
= CEIL_2MB(region
->mmap_addr
+ region
->mmap_size
);
537 SPDK_INFOLOG(SPDK_LOG_VHOST
, "Registering VM memory for vtophys translation - 0x%jx len:0x%jx\n",
540 if (spdk_mem_register((void *)start
, len
) != 0) {
541 SPDK_WARNLOG("Failed to register memory region %"PRIu32
". Future vtophys translation might fail.\n",
549 spdk_vhost_dev_mem_unregister(struct spdk_vhost_dev
*vdev
)
551 struct rte_vhost_mem_region
*region
;
554 for (i
= 0; i
< vdev
->mem
->nregions
; i
++) {
555 uint64_t start
, end
, len
;
556 region
= &vdev
->mem
->regions
[i
];
557 start
= FLOOR_2MB(region
->mmap_addr
);
558 end
= CEIL_2MB(region
->mmap_addr
+ region
->mmap_size
);
561 if (spdk_vtophys((void *) start
) == SPDK_VTOPHYS_ERROR
) {
562 continue; /* region has not been registered */
565 if (spdk_mem_unregister((void *)start
, len
) != 0) {
573 spdk_vhost_free_reactor(uint32_t lcore
)
575 g_num_ctrlrs
[lcore
]--;
578 struct spdk_vhost_dev
*
579 spdk_vhost_dev_find(const char *ctrlr_name
)
581 struct spdk_vhost_dev
*vdev
;
582 size_t dev_dirname_len
= strlen(dev_dirname
);
584 if (strncmp(ctrlr_name
, dev_dirname
, dev_dirname_len
) == 0) {
585 ctrlr_name
+= dev_dirname_len
;
588 TAILQ_FOREACH(vdev
, &g_spdk_vhost_devices
, tailq
) {
589 if (strcmp(vdev
->name
, ctrlr_name
) == 0) {
598 spdk_vhost_parse_core_mask(const char *mask
, struct spdk_cpuset
*cpumask
)
602 if (cpumask
== NULL
) {
607 spdk_cpuset_copy(cpumask
, spdk_app_get_core_mask());
611 rc
= spdk_app_parse_core_mask(mask
, cpumask
);
613 SPDK_ERRLOG("invalid cpumask %s\n", mask
);
617 if (spdk_cpuset_count(cpumask
) == 0) {
618 SPDK_ERRLOG("no cpu is selected among reactor mask(=%s)\n",
619 spdk_cpuset_fmt(spdk_app_get_core_mask()));
627 _start_rte_driver(void *arg
)
631 if (rte_vhost_driver_start(path
) != 0) {
639 spdk_vhost_dev_register(struct spdk_vhost_dev
*vdev
, const char *name
, const char *mask_str
,
640 const struct spdk_vhost_dev_backend
*backend
)
642 static unsigned ctrlr_num
;
644 struct stat file_stat
;
645 struct spdk_cpuset
*cpumask
;
650 /* We expect devices inside g_spdk_vhost_devices to be sorted in ascending
651 * order in regard of vdev->id. For now we always set vdev->id = ctrlr_num++
652 * and append each vdev to the very end of g_spdk_vhost_devices list.
653 * This is required for foreach vhost events to work.
655 if (ctrlr_num
== UINT_MAX
) {
661 SPDK_ERRLOG("Can't register controller with no name\n");
665 cpumask
= spdk_cpuset_alloc();
667 SPDK_ERRLOG("spdk_cpuset_alloc failed\n");
671 if (spdk_vhost_parse_core_mask(mask_str
, cpumask
) != 0) {
672 SPDK_ERRLOG("cpumask %s is invalid (app mask is 0x%s)\n",
673 mask_str
, spdk_cpuset_fmt(spdk_app_get_core_mask()));
678 if (spdk_vhost_dev_find(name
)) {
679 SPDK_ERRLOG("vhost controller %s already exists.\n", name
);
684 if (snprintf(path
, sizeof(path
), "%s%s", dev_dirname
, name
) >= (int)sizeof(path
)) {
685 SPDK_ERRLOG("Resulting socket path for controller %s is too long: %s%s\n", name
, dev_dirname
,
691 /* Register vhost driver to handle vhost messages. */
692 if (stat(path
, &file_stat
) != -1) {
693 if (!S_ISSOCK(file_stat
.st_mode
)) {
694 SPDK_ERRLOG("Cannot create a domain socket at path \"%s\": "
695 "The file already exists and is not a socket.\n",
699 } else if (unlink(path
) != 0) {
700 SPDK_ERRLOG("Cannot create a domain socket at path \"%s\": "
701 "The socket already exists and failed to unlink.\n",
708 if (rte_vhost_driver_register(path
, 0) != 0) {
709 SPDK_ERRLOG("Could not register controller %s with vhost library\n", name
);
710 SPDK_ERRLOG("Check if domain socket %s already exists\n", path
);
714 if (rte_vhost_driver_set_features(path
, backend
->virtio_features
) ||
715 rte_vhost_driver_disable_features(path
, backend
->disabled_features
)) {
716 SPDK_ERRLOG("Couldn't set vhost features for controller %s\n", name
);
718 rte_vhost_driver_unregister(path
);
723 if (rte_vhost_driver_callback_register(path
, &g_spdk_vhost_ops
) != 0) {
724 rte_vhost_driver_unregister(path
);
725 SPDK_ERRLOG("Couldn't register callbacks for controller %s\n", name
);
730 /* The following might start a POSIX thread that polls for incoming
731 * socket connections and calls backend->start/stop_device. These backend
732 * callbacks are also protected by the global SPDK vhost mutex, so we're
733 * safe with not initializing the vdev just yet.
735 if (spdk_call_unaffinitized(_start_rte_driver
, path
) == NULL
) {
736 SPDK_ERRLOG("Failed to start vhost driver for controller %s (%d): %s\n",
737 name
, errno
, spdk_strerror(errno
));
738 rte_vhost_driver_unregister(path
);
743 vdev
->name
= strdup(name
);
744 vdev
->path
= strdup(path
);
745 vdev
->id
= ctrlr_num
++;
748 vdev
->cpumask
= cpumask
;
749 vdev
->registered
= true;
750 vdev
->backend
= backend
;
752 spdk_vhost_set_coalescing(vdev
, SPDK_VHOST_COALESCING_DELAY_BASE_US
,
753 SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD
);
754 vdev
->next_stats_check_time
= 0;
755 vdev
->stats_check_interval
= SPDK_VHOST_DEV_STATS_CHECK_INTERVAL_MS
* spdk_get_ticks_hz() /
758 TAILQ_INSERT_TAIL(&g_spdk_vhost_devices
, vdev
, tailq
);
760 SPDK_INFOLOG(SPDK_LOG_VHOST
, "Controller %s: new controller added\n", vdev
->name
);
764 spdk_cpuset_free(cpumask
);
769 spdk_vhost_dev_unregister(struct spdk_vhost_dev
*vdev
)
771 if (vdev
->vid
!= -1) {
772 SPDK_ERRLOG("Controller %s has still valid connection.\n", vdev
->name
);
776 if (vdev
->registered
&& rte_vhost_driver_unregister(vdev
->path
) != 0) {
777 SPDK_ERRLOG("Could not unregister controller %s with vhost library\n"
778 "Check if domain socket %s still exists\n",
779 vdev
->name
, vdev
->path
);
783 SPDK_INFOLOG(SPDK_LOG_VHOST
, "Controller %s: removed\n", vdev
->name
);
787 spdk_cpuset_free(vdev
->cpumask
);
788 TAILQ_REMOVE(&g_spdk_vhost_devices
, vdev
, tailq
);
792 static struct spdk_vhost_dev
*
793 spdk_vhost_dev_next(unsigned i
)
795 struct spdk_vhost_dev
*vdev
;
797 TAILQ_FOREACH(vdev
, &g_spdk_vhost_devices
, tailq
) {
807 spdk_vhost_dev_get_name(struct spdk_vhost_dev
*vdev
)
809 assert(vdev
!= NULL
);
813 const struct spdk_cpuset
*
814 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev
*vdev
)
816 assert(vdev
!= NULL
);
817 return vdev
->cpumask
;
821 spdk_vhost_allocate_reactor(struct spdk_cpuset
*cpumask
)
823 uint32_t i
, selected_core
;
826 min_ctrlrs
= INT_MAX
;
827 selected_core
= spdk_env_get_first_core();
829 SPDK_ENV_FOREACH_CORE(i
) {
830 if (!spdk_cpuset_get_cpu(cpumask
, i
)) {
834 if (g_num_ctrlrs
[i
] < min_ctrlrs
) {
836 min_ctrlrs
= g_num_ctrlrs
[i
];
840 g_num_ctrlrs
[selected_core
]++;
841 return selected_core
;
845 spdk_vhost_dev_backend_event_done(void *event_ctx
, int response
)
847 struct spdk_vhost_dev_event_ctx
*ctx
= event_ctx
;
849 ctx
->response
= response
;
854 spdk_vhost_event_cb(void *arg1
, void *arg2
)
856 struct spdk_vhost_dev_event_ctx
*ctx
= arg1
;
858 ctx
->cb_fn(ctx
->vdev
, ctx
);
862 spdk_vhost_event_async_fn(void *arg1
, void *arg2
)
864 struct spdk_vhost_dev_event_ctx
*ctx
= arg1
;
865 struct spdk_vhost_dev
*vdev
;
866 struct spdk_event
*ev
;
868 if (pthread_mutex_trylock(&g_spdk_vhost_mutex
) != 0) {
869 ev
= spdk_event_allocate(spdk_env_get_current_core(), spdk_vhost_event_async_fn
, arg1
, arg2
);
874 vdev
= spdk_vhost_dev_find_by_id(ctx
->vdev_id
);
875 if (vdev
!= ctx
->vdev
) {
876 /* vdev has been changed after enqueuing this event */
880 if (vdev
!= NULL
&& vdev
->lcore
>= 0 &&
881 (uint32_t)vdev
->lcore
!= spdk_env_get_current_core()) {
882 /* if vdev has been relocated to other core, it is no longer thread-safe
883 * to access its contents here. Even though we're running under global vhost
884 * mutex, the controller itself (and its pollers) are not. We need to chase
885 * the vdev thread as many times as necessary.
887 ev
= spdk_event_allocate(vdev
->lcore
, spdk_vhost_event_async_fn
, arg1
, arg2
);
889 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
893 ctx
->cb_fn(vdev
, arg2
);
894 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
899 static void spdk_vhost_external_event_foreach_continue(struct spdk_vhost_dev
*vdev
,
900 spdk_vhost_event_fn fn
, void *arg
);
903 spdk_vhost_event_async_foreach_fn(void *arg1
, void *arg2
)
905 struct spdk_vhost_dev_event_ctx
*ctx
= arg1
;
906 struct spdk_vhost_dev
*vdev
;
907 struct spdk_event
*ev
;
909 if (pthread_mutex_trylock(&g_spdk_vhost_mutex
) != 0) {
910 ev
= spdk_event_allocate(spdk_env_get_current_core(),
911 spdk_vhost_event_async_foreach_fn
, arg1
, arg2
);
916 vdev
= spdk_vhost_dev_find_by_id(ctx
->vdev_id
);
917 if (vdev
!= ctx
->vdev
) {
918 /* ctx->vdev is probably a dangling pointer at this point.
919 * It must have been removed in the meantime, so we just skip
920 * it in our foreach chain. */
921 goto out_unlock_continue
;
924 /* the assert is just for static analyzers, vdev cannot be NULL here */
925 assert(vdev
!= NULL
);
926 if (vdev
->lcore
>= 0 &&
927 (uint32_t)vdev
->lcore
!= spdk_env_get_current_core()) {
928 /* if vdev has been relocated to other core, it is no longer thread-safe
929 * to access its contents here. Even though we're running under global vhost
930 * mutex, the controller itself (and its pollers) are not. We need to chase
931 * the vdev thread as many times as necessary.
933 ev
= spdk_event_allocate(vdev
->lcore
,
934 spdk_vhost_event_async_foreach_fn
, arg1
, arg2
);
936 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
940 ctx
->cb_fn(vdev
, arg2
);
943 vdev
= spdk_vhost_dev_next(ctx
->vdev_id
);
944 spdk_vhost_external_event_foreach_continue(vdev
, ctx
->cb_fn
, arg2
);
945 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
951 _spdk_vhost_event_send(struct spdk_vhost_dev
*vdev
, spdk_vhost_event_fn cb_fn
,
952 unsigned timeout_sec
, const char *errmsg
)
954 struct spdk_vhost_dev_event_ctx ev_ctx
= {0};
955 struct spdk_event
*ev
;
956 struct timespec timeout
;
959 rc
= sem_init(&ev_ctx
.sem
, 0, 0);
961 SPDK_ERRLOG("Failed to initialize semaphore for vhost timed event\n");
966 ev_ctx
.cb_fn
= cb_fn
;
967 ev
= spdk_event_allocate(vdev
->lcore
, spdk_vhost_event_cb
, &ev_ctx
, NULL
);
970 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
972 clock_gettime(CLOCK_REALTIME
, &timeout
);
973 timeout
.tv_sec
+= timeout_sec
;
975 rc
= sem_timedwait(&ev_ctx
.sem
, &timeout
);
977 SPDK_ERRLOG("Timeout waiting for event: %s.\n", errmsg
);
978 sem_wait(&ev_ctx
.sem
);
981 sem_destroy(&ev_ctx
.sem
);
982 pthread_mutex_lock(&g_spdk_vhost_mutex
);
983 return ev_ctx
.response
;
987 spdk_vhost_event_async_send(struct spdk_vhost_dev
*vdev
, spdk_vhost_event_fn cb_fn
, void *arg
,
990 struct spdk_vhost_dev_event_ctx
*ev_ctx
;
991 struct spdk_event
*ev
;
994 ev_ctx
= calloc(1, sizeof(*ev_ctx
));
995 if (ev_ctx
== NULL
) {
996 SPDK_ERRLOG("Failed to alloc vhost event.\n");
1001 ev_ctx
->vdev
= vdev
;
1002 ev_ctx
->vdev_id
= vdev
->id
;
1003 ev_ctx
->cb_fn
= cb_fn
;
1005 fn
= foreach
? spdk_vhost_event_async_foreach_fn
: spdk_vhost_event_async_fn
;
1006 ev
= spdk_event_allocate(ev_ctx
->vdev
->lcore
, fn
, ev_ctx
, arg
);
1008 spdk_event_call(ev
);
1014 stop_device(int vid
)
1016 struct spdk_vhost_dev
*vdev
;
1017 struct rte_vhost_vring
*q
;
1021 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1022 vdev
= spdk_vhost_dev_find_by_vid(vid
);
1024 SPDK_ERRLOG("Couldn't find device with vid %d to stop.\n", vid
);
1025 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1029 if (vdev
->lcore
== -1) {
1030 SPDK_ERRLOG("Controller %s is not loaded.\n", vdev
->name
);
1031 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1035 rc
= _spdk_vhost_event_send(vdev
, vdev
->backend
->stop_device
, 3, "stop device");
1037 SPDK_ERRLOG("Couldn't stop device with vid %d.\n", vid
);
1038 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1042 for (i
= 0; i
< vdev
->max_queues
; i
++) {
1043 q
= &vdev
->virtqueue
[i
].vring
;
1044 if (q
->desc
== NULL
) {
1047 rte_vhost_set_vhost_vring_last_idx(vdev
->vid
, i
, q
->last_avail_idx
, q
->last_used_idx
);
1050 spdk_vhost_dev_mem_unregister(vdev
);
1052 spdk_vhost_free_reactor(vdev
->lcore
);
1054 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1058 start_device(int vid
)
1060 struct spdk_vhost_dev
*vdev
;
1064 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1066 vdev
= spdk_vhost_dev_find_by_vid(vid
);
1068 SPDK_ERRLOG("Controller with vid %d doesn't exist.\n", vid
);
1072 if (vdev
->lcore
!= -1) {
1073 SPDK_ERRLOG("Controller %s already loaded.\n", vdev
->name
);
1077 vdev
->max_queues
= 0;
1078 memset(vdev
->virtqueue
, 0, sizeof(vdev
->virtqueue
));
1079 for (i
= 0; i
< SPDK_VHOST_MAX_VQUEUES
; i
++) {
1080 if (rte_vhost_get_vhost_vring(vid
, i
, &vdev
->virtqueue
[i
].vring
)) {
1084 if (vdev
->virtqueue
[i
].vring
.desc
== NULL
||
1085 vdev
->virtqueue
[i
].vring
.size
== 0) {
1089 /* Disable notifications. */
1090 if (rte_vhost_enable_guest_notification(vid
, i
, 0) != 0) {
1091 SPDK_ERRLOG("vhost device %d: Failed to disable guest notification on queue %"PRIu16
"\n", vid
, i
);
1095 vdev
->max_queues
= i
+ 1;
1098 if (rte_vhost_get_negotiated_features(vid
, &vdev
->negotiated_features
) != 0) {
1099 SPDK_ERRLOG("vhost device %d: Failed to get negotiated driver features\n", vid
);
1103 if (rte_vhost_get_mem_table(vid
, &vdev
->mem
) != 0) {
1104 SPDK_ERRLOG("vhost device %d: Failed to get guest memory table\n", vid
);
1109 * Not sure right now but this look like some kind of QEMU bug and guest IO
1110 * might be frozed without kicking all queues after live-migration. This look like
1111 * the previous vhost instance failed to effectively deliver all interrupts before
1112 * the GET_VRING_BASE message. This shouldn't harm guest since spurious interrupts
1113 * should be ignored by guest virtio driver.
1115 * Tested on QEMU 2.10.91 and 2.11.50.
1117 for (i
= 0; i
< vdev
->max_queues
; i
++) {
1118 if (vdev
->virtqueue
[i
].vring
.callfd
!= -1) {
1119 eventfd_write(vdev
->virtqueue
[i
].vring
.callfd
, (eventfd_t
)1);
1123 vdev
->lcore
= spdk_vhost_allocate_reactor(vdev
->cpumask
);
1124 spdk_vhost_dev_mem_register(vdev
);
1125 rc
= _spdk_vhost_event_send(vdev
, vdev
->backend
->start_device
, 3, "start device");
1127 spdk_vhost_dev_mem_unregister(vdev
);
1129 spdk_vhost_free_reactor(vdev
->lcore
);
1134 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1139 get_config(int vid
, uint8_t *config
, uint32_t len
)
1141 struct spdk_vhost_dev
*vdev
;
1144 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1145 vdev
= spdk_vhost_dev_find_by_vid(vid
);
1147 SPDK_ERRLOG("Controller with vid %d doesn't exist.\n", vid
);
1151 if (vdev
->backend
->vhost_get_config
) {
1152 rc
= vdev
->backend
->vhost_get_config(vdev
, config
, len
);
1156 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1161 set_config(int vid
, uint8_t *config
, uint32_t offset
, uint32_t size
, uint32_t flags
)
1163 struct spdk_vhost_dev
*vdev
;
1166 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1167 vdev
= spdk_vhost_dev_find_by_vid(vid
);
1169 SPDK_ERRLOG("Controller with vid %d doesn't exist.\n", vid
);
1173 if (vdev
->backend
->vhost_set_config
) {
1174 rc
= vdev
->backend
->vhost_set_config(vdev
, config
, offset
, size
, flags
);
1178 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1183 spdk_vhost_set_socket_path(const char *basename
)
1187 if (basename
&& strlen(basename
) > 0) {
1188 ret
= snprintf(dev_dirname
, sizeof(dev_dirname
) - 2, "%s", basename
);
1192 if ((size_t)ret
>= sizeof(dev_dirname
) - 2) {
1193 SPDK_ERRLOG("Char dev dir path length %d is too long\n", ret
);
1197 if (dev_dirname
[ret
- 1] != '/') {
1198 dev_dirname
[ret
] = '/';
1199 dev_dirname
[ret
+ 1] = '\0';
1207 session_shutdown(void *arg
)
1209 struct spdk_vhost_dev
*vdev
= NULL
;
1211 TAILQ_FOREACH(vdev
, &g_spdk_vhost_devices
, tailq
) {
1212 rte_vhost_driver_unregister(vdev
->path
);
1213 vdev
->registered
= false;
1216 SPDK_INFOLOG(SPDK_LOG_VHOST
, "Exiting\n");
1217 spdk_event_call((struct spdk_event
*)arg
);
1222 spdk_vhost_dump_info_json(struct spdk_vhost_dev
*vdev
, struct spdk_json_write_ctx
*w
)
1224 assert(vdev
->backend
->dump_info_json
!= NULL
);
1225 vdev
->backend
->dump_info_json(vdev
, w
);
1229 spdk_vhost_dev_remove(struct spdk_vhost_dev
*vdev
)
1231 return vdev
->backend
->remove_device(vdev
);
1235 new_connection(int vid
)
1237 struct spdk_vhost_dev
*vdev
;
1238 char ifname
[PATH_MAX
];
1240 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1241 if (rte_vhost_get_ifname(vid
, ifname
, PATH_MAX
) < 0) {
1242 SPDK_ERRLOG("Couldn't get a valid ifname for device with vid %d\n", vid
);
1243 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1247 vdev
= spdk_vhost_dev_find(ifname
);
1249 SPDK_ERRLOG("Couldn't find device with vid %d to create connection for.\n", vid
);
1250 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1254 /* since pollers are not running it safe not to use spdk_event here */
1255 if (vdev
->vid
!= -1) {
1256 SPDK_ERRLOG("Device with vid %d is already connected.\n", vid
);
1257 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1262 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1267 destroy_connection(int vid
)
1269 struct spdk_vhost_dev
*vdev
;
1271 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1272 vdev
= spdk_vhost_dev_find_by_vid(vid
);
1274 SPDK_ERRLOG("Couldn't find device with vid %d to destroy connection for.\n", vid
);
1275 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1279 /* since pollers are not running it safe not to use spdk_event here */
1281 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1285 spdk_vhost_call_external_event(const char *ctrlr_name
, spdk_vhost_event_fn fn
, void *arg
)
1287 struct spdk_vhost_dev
*vdev
;
1289 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1290 vdev
= spdk_vhost_dev_find(ctrlr_name
);
1293 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1298 if (vdev
->lcore
== -1) {
1301 spdk_vhost_event_async_send(vdev
, fn
, arg
, false);
1304 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1308 spdk_vhost_external_event_foreach_continue(struct spdk_vhost_dev
*vdev
,
1309 spdk_vhost_event_fn fn
, void *arg
)
1316 while (vdev
->lcore
== -1) {
1318 vdev
= spdk_vhost_dev_next(vdev
->id
);
1325 spdk_vhost_event_async_send(vdev
, fn
, arg
, true);
1329 spdk_vhost_call_external_event_foreach(spdk_vhost_event_fn fn
, void *arg
)
1331 struct spdk_vhost_dev
*vdev
;
1333 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1334 vdev
= TAILQ_FIRST(&g_spdk_vhost_devices
);
1335 spdk_vhost_external_event_foreach_continue(vdev
, fn
, arg
);
1336 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1340 spdk_vhost_lock(void)
1342 pthread_mutex_lock(&g_spdk_vhost_mutex
);
1346 spdk_vhost_unlock(void)
1348 pthread_mutex_unlock(&g_spdk_vhost_mutex
);
1352 spdk_vhost_init(void)
1358 if (dev_dirname
[0] == '\0') {
1359 if (getcwd(dev_dirname
, sizeof(dev_dirname
) - 1) == NULL
) {
1360 SPDK_ERRLOG("getcwd failed (%d): %s\n", errno
, spdk_strerror(errno
));
1364 len
= strlen(dev_dirname
);
1365 if (dev_dirname
[len
- 1] != '/') {
1366 dev_dirname
[len
] = '/';
1367 dev_dirname
[len
+ 1] = '\0';
1371 last_core
= spdk_env_get_last_core();
1372 g_num_ctrlrs
= calloc(last_core
+ 1, sizeof(uint32_t));
1373 if (!g_num_ctrlrs
) {
1374 SPDK_ERRLOG("Could not allocate array size=%u for g_num_ctrlrs\n",
1379 ret
= spdk_vhost_scsi_controller_construct();
1381 SPDK_ERRLOG("Cannot construct vhost controllers\n");
1385 ret
= spdk_vhost_blk_controller_construct();
1387 SPDK_ERRLOG("Cannot construct vhost block controllers\n");
1391 ret
= spdk_vhost_nvme_controller_construct();
1393 SPDK_ERRLOG("Cannot construct vhost NVMe controllers\n");
1401 _spdk_vhost_fini_remove_vdev_cb(struct spdk_vhost_dev
*vdev
, void *arg
)
1403 spdk_vhost_fini_cb fini_cb
= arg
;
1406 spdk_vhost_dev_remove(vdev
);
1410 /* All devices are removed now. */
1417 _spdk_vhost_fini(void *arg1
, void *arg2
)
1419 spdk_vhost_fini_cb fini_cb
= arg1
;
1421 spdk_vhost_call_external_event_foreach(_spdk_vhost_fini_remove_vdev_cb
, fini_cb
);
1425 spdk_vhost_fini(spdk_vhost_fini_cb fini_cb
)
1429 struct spdk_event
*fini_ev
;
1431 fini_ev
= spdk_event_allocate(spdk_env_get_current_core(), _spdk_vhost_fini
, fini_cb
, NULL
);
1433 /* rte_vhost API for removing sockets is not asynchronous. Since it may call SPDK
1434 * ops for stopping a device or removing a connection, we need to call it from
1435 * a separate thread to avoid deadlock.
1437 rc
= pthread_create(&tid
, NULL
, &session_shutdown
, fini_ev
);
1439 SPDK_ERRLOG("Failed to start session shutdown thread (%d): %s\n", rc
, spdk_strerror(rc
));
1442 pthread_detach(tid
);
1445 struct spdk_vhost_write_config_json_ctx
{
1446 struct spdk_json_write_ctx
*w
;
1447 struct spdk_event
*done_ev
;
1451 spdk_vhost_config_json_cb(struct spdk_vhost_dev
*vdev
, void *arg
)
1453 struct spdk_vhost_write_config_json_ctx
*ctx
= arg
;
1454 uint32_t delay_base_us
;
1455 uint32_t iops_threshold
;
1458 spdk_json_write_array_end(ctx
->w
);
1459 spdk_event_call(ctx
->done_ev
);
1464 vdev
->backend
->write_config_json(vdev
, ctx
->w
);
1466 spdk_vhost_get_coalescing(vdev
, &delay_base_us
, &iops_threshold
);
1467 if (delay_base_us
) {
1468 spdk_json_write_object_begin(ctx
->w
);
1469 spdk_json_write_named_string(ctx
->w
, "method", "set_vhost_controller_coalescing");
1471 spdk_json_write_named_object_begin(ctx
->w
, "params");
1472 spdk_json_write_named_string(ctx
->w
, "ctrlr", vdev
->name
);
1473 spdk_json_write_named_uint32(ctx
->w
, "delay_base_us", delay_base_us
);
1474 spdk_json_write_named_uint32(ctx
->w
, "iops_threshold", iops_threshold
);
1475 spdk_json_write_object_end(ctx
->w
);
1477 spdk_json_write_object_end(ctx
->w
);
1484 spdk_vhost_config_json(struct spdk_json_write_ctx
*w
, struct spdk_event
*done_ev
)
1486 struct spdk_vhost_write_config_json_ctx
*ctx
;
1488 ctx
= calloc(1, sizeof(*ctx
));
1490 spdk_event_call(done_ev
);
1495 ctx
->done_ev
= done_ev
;
1497 spdk_json_write_array_begin(w
);
1499 spdk_vhost_call_external_event_foreach(spdk_vhost_config_json_cb
, ctx
);
1502 SPDK_LOG_REGISTER_COMPONENT("vhost", SPDK_LOG_VHOST
)
1503 SPDK_LOG_REGISTER_COMPONENT("vhost_ring", SPDK_LOG_VHOST_RING
)