]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include "spdk/stdinc.h" | |
35 | ||
36 | #include <sys/eventfd.h> | |
37 | ||
38 | #include <linux/virtio_scsi.h> | |
39 | ||
40 | #include <rte_config.h> | |
41 | #include <rte_malloc.h> | |
42 | #include <rte_alarm.h> | |
43 | ||
44 | #include "virtio_user/vhost.h" | |
45 | #include "spdk/string.h" | |
9f95a23c | 46 | #include "spdk/config.h" |
11fdf7f2 TL |
47 | |
48 | #include "spdk_internal/virtio.h" | |
49 | ||
50 | #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \ | |
51 | ((1ULL << VHOST_USER_PROTOCOL_F_MQ) | \ | |
52 | (1ULL << VHOST_USER_PROTOCOL_F_CONFIG)) | |
53 | ||
54 | static int | |
55 | virtio_user_create_queue(struct virtio_dev *vdev, uint32_t queue_sel) | |
56 | { | |
57 | struct virtio_user_dev *dev = vdev->ctx; | |
58 | ||
59 | /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come | |
60 | * firstly because vhost depends on this msg to allocate virtqueue | |
61 | * pair. | |
62 | */ | |
63 | struct vhost_vring_file file; | |
64 | ||
65 | file.index = queue_sel; | |
66 | file.fd = dev->callfds[queue_sel]; | |
67 | return dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file); | |
68 | } | |
69 | ||
70 | static int | |
9f95a23c | 71 | virtio_user_set_vring_addr(struct virtio_dev *vdev, uint32_t queue_sel) |
11fdf7f2 TL |
72 | { |
73 | struct virtio_user_dev *dev = vdev->ctx; | |
11fdf7f2 TL |
74 | struct vring *vring = &dev->vrings[queue_sel]; |
75 | struct vhost_vring_addr addr = { | |
76 | .index = queue_sel, | |
77 | .desc_user_addr = (uint64_t)(uintptr_t)vring->desc, | |
78 | .avail_user_addr = (uint64_t)(uintptr_t)vring->avail, | |
79 | .used_user_addr = (uint64_t)(uintptr_t)vring->used, | |
80 | .log_guest_addr = 0, | |
81 | .flags = 0, /* disable log */ | |
82 | }; | |
9f95a23c TL |
83 | |
84 | return dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr); | |
85 | } | |
86 | ||
87 | static int | |
88 | virtio_user_kick_queue(struct virtio_dev *vdev, uint32_t queue_sel) | |
89 | { | |
90 | struct virtio_user_dev *dev = vdev->ctx; | |
91 | struct vhost_vring_file file; | |
92 | struct vhost_vring_state state; | |
93 | struct vring *vring = &dev->vrings[queue_sel]; | |
11fdf7f2 TL |
94 | int rc; |
95 | ||
96 | state.index = queue_sel; | |
97 | state.num = vring->num; | |
98 | rc = dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state); | |
99 | if (rc < 0) { | |
100 | return rc; | |
101 | } | |
102 | ||
103 | state.index = queue_sel; | |
104 | state.num = 0; /* no reservation */ | |
105 | rc = dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state); | |
106 | if (rc < 0) { | |
107 | return rc; | |
108 | } | |
109 | ||
9f95a23c | 110 | virtio_user_set_vring_addr(vdev, queue_sel); |
11fdf7f2 TL |
111 | |
112 | /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes | |
113 | * lastly because vhost depends on this msg to judge if | |
114 | * virtio is ready. | |
115 | */ | |
116 | file.index = queue_sel; | |
117 | file.fd = dev->kickfds[queue_sel]; | |
118 | return dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file); | |
119 | } | |
120 | ||
121 | static int | |
122 | virtio_user_stop_queue(struct virtio_dev *vdev, uint32_t queue_sel) | |
123 | { | |
124 | struct virtio_user_dev *dev = vdev->ctx; | |
125 | struct vhost_vring_state state; | |
126 | ||
127 | state.index = queue_sel; | |
128 | state.num = 0; | |
129 | ||
130 | return dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE, &state); | |
131 | } | |
132 | ||
133 | static int | |
134 | virtio_user_queue_setup(struct virtio_dev *vdev, | |
135 | int (*fn)(struct virtio_dev *, uint32_t)) | |
136 | { | |
137 | uint32_t i; | |
138 | int rc; | |
139 | ||
140 | for (i = 0; i < vdev->max_queues; ++i) { | |
141 | rc = fn(vdev, i); | |
142 | if (rc < 0) { | |
143 | SPDK_ERRLOG("setup tx vq fails: %"PRIu32".\n", i); | |
144 | return rc; | |
145 | } | |
146 | } | |
147 | ||
148 | return 0; | |
149 | } | |
150 | ||
151 | static int | |
152 | virtio_user_map_notify(void *cb_ctx, struct spdk_mem_map *map, | |
153 | enum spdk_mem_map_notify_action action, | |
154 | void *vaddr, size_t size) | |
155 | { | |
156 | struct virtio_dev *vdev = cb_ctx; | |
157 | struct virtio_user_dev *dev = vdev->ctx; | |
158 | uint64_t features; | |
159 | int ret; | |
160 | ||
161 | /* We have to resend all mappings anyway, so don't bother with any | |
162 | * page tracking. | |
163 | */ | |
164 | ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); | |
165 | if (ret < 0) { | |
166 | return ret; | |
167 | } | |
168 | ||
9f95a23c TL |
169 | #ifdef SPDK_CONFIG_VHOST_INTERNAL_LIB |
170 | /* Our internal rte_vhost lib requires SET_VRING_ADDR to flush a pending | |
171 | * SET_MEM_TABLE. On the other hand, the upstream rte_vhost will invalidate | |
172 | * the entire queue upon receiving SET_VRING_ADDR message, so we mustn't | |
173 | * send it here. Both behaviors are strictly implementation specific, but | |
174 | * this message isn't needed from the point of the spec, so send it only | |
175 | * if vhost is compiled with our internal lib. | |
11fdf7f2 | 176 | */ |
9f95a23c | 177 | ret = virtio_user_queue_setup(vdev, virtio_user_set_vring_addr); |
11fdf7f2 TL |
178 | if (ret < 0) { |
179 | return ret; | |
180 | } | |
9f95a23c | 181 | #endif |
11fdf7f2 TL |
182 | |
183 | /* Since we might want to use that mapping straight away, we have to | |
184 | * make sure the guest has already processed our SET_MEM_TABLE message. | |
185 | * F_REPLY_ACK is just a feature and the host is not obliged to | |
186 | * support it, so we send a simple message that always has a response | |
187 | * and we wait for that response. Messages are always processed in order. | |
188 | */ | |
189 | return dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, &features); | |
190 | } | |
191 | ||
192 | static int | |
193 | virtio_user_register_mem(struct virtio_dev *vdev) | |
194 | { | |
195 | struct virtio_user_dev *dev = vdev->ctx; | |
196 | const struct spdk_mem_map_ops virtio_user_map_ops = { | |
197 | .notify_cb = virtio_user_map_notify, | |
198 | .are_contiguous = NULL | |
199 | }; | |
200 | ||
201 | dev->mem_map = spdk_mem_map_alloc(0, &virtio_user_map_ops, vdev); | |
202 | if (dev->mem_map == NULL) { | |
203 | SPDK_ERRLOG("spdk_mem_map_alloc() failed\n"); | |
204 | return -1; | |
205 | } | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | static void | |
211 | virtio_user_unregister_mem(struct virtio_dev *vdev) | |
212 | { | |
213 | struct virtio_user_dev *dev = vdev->ctx; | |
214 | ||
215 | spdk_mem_map_free(&dev->mem_map); | |
216 | } | |
217 | ||
218 | static int | |
219 | virtio_user_start_device(struct virtio_dev *vdev) | |
220 | { | |
221 | struct virtio_user_dev *dev = vdev->ctx; | |
222 | uint64_t host_max_queues; | |
223 | int ret; | |
224 | ||
225 | if ((dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) == 0 && | |
226 | vdev->max_queues > 1 + vdev->fixed_queues_num) { | |
227 | SPDK_WARNLOG("%s: requested %"PRIu16" request queues, but the " | |
228 | "host doesn't support VHOST_USER_PROTOCOL_F_MQ. " | |
229 | "Only one request queue will be used.\n", | |
230 | vdev->name, vdev->max_queues - vdev->fixed_queues_num); | |
231 | vdev->max_queues = 1 + vdev->fixed_queues_num; | |
232 | } | |
233 | ||
234 | /* negotiate the number of I/O queues. */ | |
235 | ret = dev->ops->send_request(dev, VHOST_USER_GET_QUEUE_NUM, &host_max_queues); | |
236 | if (ret < 0) { | |
237 | return ret; | |
238 | } | |
239 | ||
240 | if (vdev->max_queues > host_max_queues + vdev->fixed_queues_num) { | |
241 | SPDK_WARNLOG("%s: requested %"PRIu16" request queues" | |
242 | "but only %"PRIu64" available\n", | |
243 | vdev->name, vdev->max_queues - vdev->fixed_queues_num, | |
244 | host_max_queues); | |
245 | vdev->max_queues = host_max_queues; | |
246 | } | |
247 | ||
248 | /* tell vhost to create queues */ | |
249 | ret = virtio_user_queue_setup(vdev, virtio_user_create_queue); | |
250 | if (ret < 0) { | |
251 | return ret; | |
252 | } | |
253 | ||
254 | ret = virtio_user_register_mem(vdev); | |
255 | if (ret < 0) { | |
256 | return ret; | |
257 | } | |
258 | ||
9f95a23c | 259 | return virtio_user_queue_setup(vdev, virtio_user_kick_queue); |
11fdf7f2 TL |
260 | } |
261 | ||
262 | static int | |
263 | virtio_user_stop_device(struct virtio_dev *vdev) | |
264 | { | |
265 | int ret; | |
266 | ||
267 | ret = virtio_user_queue_setup(vdev, virtio_user_stop_queue); | |
268 | /* a queue might fail to stop for various reasons, e.g. socket | |
269 | * connection going down, but this mustn't prevent us from freeing | |
270 | * the mem map. | |
271 | */ | |
272 | virtio_user_unregister_mem(vdev); | |
273 | return ret; | |
274 | } | |
275 | ||
276 | static int | |
277 | virtio_user_dev_setup(struct virtio_dev *vdev) | |
278 | { | |
279 | struct virtio_user_dev *dev = vdev->ctx; | |
280 | uint16_t i; | |
281 | ||
282 | dev->vhostfd = -1; | |
283 | ||
284 | for (i = 0; i < SPDK_VIRTIO_MAX_VIRTQUEUES; ++i) { | |
285 | dev->callfds[i] = -1; | |
286 | dev->kickfds[i] = -1; | |
287 | } | |
288 | ||
289 | dev->ops = &ops_user; | |
290 | ||
291 | return dev->ops->setup(dev); | |
292 | } | |
293 | ||
294 | static int | |
295 | virtio_user_read_dev_config(struct virtio_dev *vdev, size_t offset, | |
296 | void *dst, int length) | |
297 | { | |
298 | struct virtio_user_dev *dev = vdev->ctx; | |
299 | struct vhost_user_config cfg = {0}; | |
300 | int rc; | |
301 | ||
302 | if ((dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_CONFIG)) == 0) { | |
303 | return -ENOTSUP; | |
304 | } | |
305 | ||
306 | cfg.offset = 0; | |
307 | cfg.size = VHOST_USER_MAX_CONFIG_SIZE; | |
308 | ||
309 | rc = dev->ops->send_request(dev, VHOST_USER_GET_CONFIG, &cfg); | |
310 | if (rc < 0) { | |
311 | SPDK_ERRLOG("get_config failed: %s\n", spdk_strerror(-rc)); | |
312 | return rc; | |
313 | } | |
314 | ||
315 | memcpy(dst, cfg.region + offset, length); | |
316 | return 0; | |
317 | } | |
318 | ||
319 | static int | |
320 | virtio_user_write_dev_config(struct virtio_dev *vdev, size_t offset, | |
321 | const void *src, int length) | |
322 | { | |
323 | struct virtio_user_dev *dev = vdev->ctx; | |
324 | struct vhost_user_config cfg = {0}; | |
325 | int rc; | |
326 | ||
327 | if ((dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_CONFIG)) == 0) { | |
328 | return -ENOTSUP; | |
329 | } | |
330 | ||
331 | cfg.offset = offset; | |
332 | cfg.size = length; | |
333 | memcpy(cfg.region, src, length); | |
334 | ||
335 | rc = dev->ops->send_request(dev, VHOST_USER_SET_CONFIG, &cfg); | |
336 | if (rc < 0) { | |
337 | SPDK_ERRLOG("set_config failed: %s\n", spdk_strerror(-rc)); | |
338 | return rc; | |
339 | } | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
344 | static void | |
345 | virtio_user_set_status(struct virtio_dev *vdev, uint8_t status) | |
346 | { | |
347 | struct virtio_user_dev *dev = vdev->ctx; | |
348 | int rc = 0; | |
349 | ||
350 | if ((dev->status & VIRTIO_CONFIG_S_NEEDS_RESET) && | |
351 | status != VIRTIO_CONFIG_S_RESET) { | |
352 | rc = -1; | |
353 | } else if (status & VIRTIO_CONFIG_S_DRIVER_OK) { | |
354 | rc = virtio_user_start_device(vdev); | |
355 | } else if (status == VIRTIO_CONFIG_S_RESET && | |
356 | (dev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { | |
357 | rc = virtio_user_stop_device(vdev); | |
358 | } | |
359 | ||
360 | if (rc != 0) { | |
361 | dev->status |= VIRTIO_CONFIG_S_NEEDS_RESET; | |
362 | } else { | |
363 | dev->status = status; | |
364 | } | |
365 | } | |
366 | ||
367 | static uint8_t | |
368 | virtio_user_get_status(struct virtio_dev *vdev) | |
369 | { | |
370 | struct virtio_user_dev *dev = vdev->ctx; | |
371 | ||
372 | return dev->status; | |
373 | } | |
374 | ||
375 | static uint64_t | |
376 | virtio_user_get_features(struct virtio_dev *vdev) | |
377 | { | |
378 | struct virtio_user_dev *dev = vdev->ctx; | |
379 | uint64_t features; | |
380 | int rc; | |
381 | ||
382 | rc = dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, &features); | |
383 | if (rc < 0) { | |
384 | SPDK_ERRLOG("get_features failed: %s\n", spdk_strerror(-rc)); | |
385 | return 0; | |
386 | } | |
387 | ||
388 | return features; | |
389 | } | |
390 | ||
391 | static int | |
392 | virtio_user_set_features(struct virtio_dev *vdev, uint64_t features) | |
393 | { | |
394 | struct virtio_user_dev *dev = vdev->ctx; | |
395 | uint64_t protocol_features; | |
396 | int ret; | |
397 | ||
398 | ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features); | |
399 | if (ret < 0) { | |
400 | return ret; | |
401 | } | |
402 | ||
403 | vdev->negotiated_features = features; | |
404 | vdev->modern = virtio_dev_has_feature(vdev, VIRTIO_F_VERSION_1); | |
405 | ||
406 | if (!virtio_dev_has_feature(vdev, VHOST_USER_F_PROTOCOL_FEATURES)) { | |
407 | /* nothing else to do */ | |
408 | return 0; | |
409 | } | |
410 | ||
411 | ret = dev->ops->send_request(dev, VHOST_USER_GET_PROTOCOL_FEATURES, &protocol_features); | |
412 | if (ret < 0) { | |
413 | return ret; | |
414 | } | |
415 | ||
416 | protocol_features &= VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES; | |
417 | ret = dev->ops->send_request(dev, VHOST_USER_SET_PROTOCOL_FEATURES, &protocol_features); | |
418 | if (ret < 0) { | |
419 | return ret; | |
420 | } | |
421 | ||
422 | dev->protocol_features = protocol_features; | |
423 | return 0; | |
424 | } | |
425 | ||
426 | static uint16_t | |
427 | virtio_user_get_queue_size(struct virtio_dev *vdev, uint16_t queue_id) | |
428 | { | |
429 | struct virtio_user_dev *dev = vdev->ctx; | |
430 | ||
431 | /* Currently each queue has same queue size */ | |
432 | return dev->queue_size; | |
433 | } | |
434 | ||
435 | static int | |
436 | virtio_user_setup_queue(struct virtio_dev *vdev, struct virtqueue *vq) | |
437 | { | |
438 | struct virtio_user_dev *dev = vdev->ctx; | |
439 | struct vhost_vring_state state; | |
440 | uint16_t queue_idx = vq->vq_queue_index; | |
441 | void *queue_mem; | |
442 | uint64_t desc_addr, avail_addr, used_addr; | |
443 | int callfd, kickfd, rc; | |
444 | ||
445 | if (dev->callfds[queue_idx] != -1 || dev->kickfds[queue_idx] != -1) { | |
446 | SPDK_ERRLOG("queue %"PRIu16" already exists\n", queue_idx); | |
447 | return -EEXIST; | |
448 | } | |
449 | ||
450 | /* May use invalid flag, but some backend uses kickfd and | |
451 | * callfd as criteria to judge if dev is alive. so finally we | |
452 | * use real event_fd. | |
453 | */ | |
454 | callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); | |
455 | if (callfd < 0) { | |
456 | SPDK_ERRLOG("callfd error, %s\n", spdk_strerror(errno)); | |
457 | return -errno; | |
458 | } | |
459 | ||
460 | kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); | |
461 | if (kickfd < 0) { | |
462 | SPDK_ERRLOG("kickfd error, %s\n", spdk_strerror(errno)); | |
463 | close(callfd); | |
464 | return -errno; | |
465 | } | |
466 | ||
9f95a23c TL |
467 | queue_mem = spdk_zmalloc(vq->vq_ring_size, VIRTIO_PCI_VRING_ALIGN, NULL, |
468 | SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); | |
11fdf7f2 TL |
469 | if (queue_mem == NULL) { |
470 | close(kickfd); | |
471 | close(callfd); | |
472 | return -ENOMEM; | |
473 | } | |
474 | ||
475 | vq->vq_ring_mem = SPDK_VTOPHYS_ERROR; | |
476 | vq->vq_ring_virt_mem = queue_mem; | |
477 | ||
478 | state.index = vq->vq_queue_index; | |
479 | state.num = 0; | |
480 | ||
481 | if (virtio_dev_has_feature(vdev, VHOST_USER_F_PROTOCOL_FEATURES)) { | |
482 | rc = dev->ops->send_request(dev, VHOST_USER_SET_VRING_ENABLE, &state); | |
483 | if (rc < 0) { | |
484 | SPDK_ERRLOG("failed to send VHOST_USER_SET_VRING_ENABLE: %s\n", | |
485 | spdk_strerror(-rc)); | |
9f95a23c | 486 | spdk_free(queue_mem); |
11fdf7f2 TL |
487 | return -rc; |
488 | } | |
489 | } | |
490 | ||
491 | dev->callfds[queue_idx] = callfd; | |
492 | dev->kickfds[queue_idx] = kickfd; | |
493 | ||
494 | desc_addr = (uintptr_t)vq->vq_ring_virt_mem; | |
495 | avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); | |
496 | used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, | |
497 | ring[vq->vq_nentries]), | |
498 | VIRTIO_PCI_VRING_ALIGN); | |
499 | ||
500 | dev->vrings[queue_idx].num = vq->vq_nentries; | |
501 | dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; | |
502 | dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; | |
503 | dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; | |
504 | ||
505 | return 0; | |
506 | } | |
507 | ||
508 | static void | |
509 | virtio_user_del_queue(struct virtio_dev *vdev, struct virtqueue *vq) | |
510 | { | |
511 | /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU | |
512 | * correspondingly stops the ioeventfds, and reset the status of | |
513 | * the device. | |
514 | * For modern devices, set queue desc, avail, used in PCI bar to 0, | |
515 | * not see any more behavior in QEMU. | |
516 | * | |
517 | * Here we just care about what information to deliver to vhost-user. | |
518 | * So we just close ioeventfd for now. | |
519 | */ | |
520 | struct virtio_user_dev *dev = vdev->ctx; | |
521 | ||
522 | close(dev->callfds[vq->vq_queue_index]); | |
523 | close(dev->kickfds[vq->vq_queue_index]); | |
524 | dev->callfds[vq->vq_queue_index] = -1; | |
525 | dev->kickfds[vq->vq_queue_index] = -1; | |
526 | ||
9f95a23c | 527 | spdk_free(vq->vq_ring_virt_mem); |
11fdf7f2 TL |
528 | } |
529 | ||
530 | static void | |
531 | virtio_user_notify_queue(struct virtio_dev *vdev, struct virtqueue *vq) | |
532 | { | |
533 | uint64_t buf = 1; | |
534 | struct virtio_user_dev *dev = vdev->ctx; | |
535 | ||
536 | if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) { | |
537 | SPDK_ERRLOG("failed to kick backend: %s.\n", spdk_strerror(errno)); | |
538 | } | |
539 | } | |
540 | ||
541 | static void | |
542 | virtio_user_destroy(struct virtio_dev *vdev) | |
543 | { | |
544 | struct virtio_user_dev *dev = vdev->ctx; | |
545 | ||
546 | close(dev->vhostfd); | |
547 | free(dev); | |
548 | } | |
549 | ||
550 | static void | |
551 | virtio_user_dump_json_info(struct virtio_dev *vdev, struct spdk_json_write_ctx *w) | |
552 | { | |
553 | struct virtio_user_dev *dev = vdev->ctx; | |
554 | ||
9f95a23c TL |
555 | spdk_json_write_named_string(w, "type", "user"); |
556 | spdk_json_write_named_string(w, "socket", dev->path); | |
11fdf7f2 TL |
557 | } |
558 | ||
559 | static void | |
560 | virtio_user_write_json_config(struct virtio_dev *vdev, struct spdk_json_write_ctx *w) | |
561 | { | |
562 | struct virtio_user_dev *dev = vdev->ctx; | |
563 | ||
564 | spdk_json_write_named_string(w, "trtype", "user"); | |
565 | spdk_json_write_named_string(w, "traddr", dev->path); | |
566 | spdk_json_write_named_uint32(w, "vq_count", vdev->max_queues - vdev->fixed_queues_num); | |
567 | spdk_json_write_named_uint32(w, "vq_size", virtio_dev_backend_ops(vdev)->get_queue_size(vdev, 0)); | |
568 | } | |
569 | ||
570 | static const struct virtio_dev_ops virtio_user_ops = { | |
571 | .read_dev_cfg = virtio_user_read_dev_config, | |
572 | .write_dev_cfg = virtio_user_write_dev_config, | |
573 | .get_status = virtio_user_get_status, | |
574 | .set_status = virtio_user_set_status, | |
575 | .get_features = virtio_user_get_features, | |
576 | .set_features = virtio_user_set_features, | |
577 | .destruct_dev = virtio_user_destroy, | |
578 | .get_queue_size = virtio_user_get_queue_size, | |
579 | .setup_queue = virtio_user_setup_queue, | |
580 | .del_queue = virtio_user_del_queue, | |
581 | .notify_queue = virtio_user_notify_queue, | |
582 | .dump_json_info = virtio_user_dump_json_info, | |
583 | .write_json_config = virtio_user_write_json_config, | |
584 | }; | |
585 | ||
586 | int | |
587 | virtio_user_dev_init(struct virtio_dev *vdev, const char *name, const char *path, | |
588 | uint32_t queue_size) | |
589 | { | |
590 | struct virtio_user_dev *dev; | |
591 | int rc; | |
592 | ||
593 | if (name == NULL) { | |
594 | SPDK_ERRLOG("No name gived for controller: %s\n", path); | |
595 | return -EINVAL; | |
596 | } | |
597 | ||
598 | dev = calloc(1, sizeof(*dev)); | |
599 | if (dev == NULL) { | |
600 | return -ENOMEM; | |
601 | } | |
602 | ||
603 | rc = virtio_dev_construct(vdev, name, &virtio_user_ops, dev); | |
604 | if (rc != 0) { | |
605 | SPDK_ERRLOG("Failed to init device: %s\n", path); | |
606 | free(dev); | |
607 | return rc; | |
608 | } | |
609 | ||
610 | vdev->is_hw = 0; | |
611 | ||
612 | snprintf(dev->path, PATH_MAX, "%s", path); | |
613 | dev->queue_size = queue_size; | |
614 | ||
615 | rc = virtio_user_dev_setup(vdev); | |
616 | if (rc < 0) { | |
617 | SPDK_ERRLOG("backend set up fails\n"); | |
618 | goto err; | |
619 | } | |
620 | ||
621 | rc = dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL); | |
622 | if (rc < 0) { | |
623 | SPDK_ERRLOG("set_owner fails: %s\n", spdk_strerror(-rc)); | |
624 | goto err; | |
625 | } | |
626 | ||
627 | return 0; | |
628 | ||
629 | err: | |
630 | virtio_dev_destruct(vdev); | |
631 | return rc; | |
632 | } |