]>
Commit | Line | Data |
---|---|---|
e72f66a0 SH |
1 | /* |
2 | * Dedicated thread for virtio-blk I/O processing | |
3 | * | |
4 | * Copyright 2012 IBM, Corp. | |
5 | * Copyright 2012 Red Hat, Inc. and/or its affiliates | |
6 | * | |
7 | * Authors: | |
8 | * Stefan Hajnoczi <stefanha@redhat.com> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
11 | * See the COPYING file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include "trace.h" | |
16 | #include "qemu/iov.h" | |
e72f66a0 SH |
17 | #include "qemu/thread.h" |
18 | #include "vring.h" | |
19 | #include "ioq.h" | |
20 | #include "migration/migration.h" | |
21 | #include "hw/virtio-blk.h" | |
22 | #include "hw/dataplane/virtio-blk.h" | |
2c20e711 | 23 | #include "block/aio.h" |
e72f66a0 SH |
24 | |
25 | enum { | |
26 | SEG_MAX = 126, /* maximum number of I/O segments */ | |
27 | VRING_MAX = SEG_MAX + 2, /* maximum number of vring descriptors */ | |
28 | REQ_MAX = VRING_MAX, /* maximum number of requests in the vring, | |
29 | * is VRING_MAX / 2 with traditional and | |
30 | * VRING_MAX with indirect descriptors */ | |
31 | }; | |
32 | ||
33 | typedef struct { | |
34 | struct iocb iocb; /* Linux AIO control block */ | |
35 | QEMUIOVector *inhdr; /* iovecs for virtio_blk_inhdr */ | |
36 | unsigned int head; /* vring descriptor index */ | |
de0161c0 SH |
37 | struct iovec *bounce_iov; /* used if guest buffers are unaligned */ |
38 | QEMUIOVector *read_qiov; /* for read completion /w bounce buffer */ | |
e72f66a0 SH |
39 | } VirtIOBlockRequest; |
40 | ||
41 | struct VirtIOBlockDataPlane { | |
42 | bool started; | |
cd7fdfe5 | 43 | bool stopping; |
e72f66a0 SH |
44 | QEMUBH *start_bh; |
45 | QemuThread thread; | |
46 | ||
47 | VirtIOBlkConf *blk; | |
48 | int fd; /* image file descriptor */ | |
49 | ||
50 | VirtIODevice *vdev; | |
51 | Vring vring; /* virtqueue vring */ | |
52 | EventNotifier *guest_notifier; /* irq */ | |
53 | ||
2c20e711 PB |
54 | /* Note that these EventNotifiers are assigned by value. This is |
55 | * fine as long as you do not call event_notifier_cleanup on them | |
56 | * (because you don't own the file descriptor or handle; you just | |
57 | * use it). | |
58 | */ | |
59 | AioContext *ctx; | |
60 | EventNotifier io_notifier; /* Linux AIO completion */ | |
61 | EventNotifier host_notifier; /* doorbell */ | |
e72f66a0 SH |
62 | |
63 | IOQueue ioqueue; /* Linux AIO queue (should really be per | |
64 | dataplane thread) */ | |
65 | VirtIOBlockRequest requests[REQ_MAX]; /* pool of requests, managed by the | |
66 | queue */ | |
67 | ||
68 | unsigned int num_reqs; | |
69 | ||
70 | Error *migration_blocker; | |
71 | }; | |
72 | ||
73 | /* Raise an interrupt to signal guest, if necessary */ | |
74 | static void notify_guest(VirtIOBlockDataPlane *s) | |
75 | { | |
76 | if (!vring_should_notify(s->vdev, &s->vring)) { | |
77 | return; | |
78 | } | |
79 | ||
80 | event_notifier_set(s->guest_notifier); | |
81 | } | |
82 | ||
83 | static void complete_request(struct iocb *iocb, ssize_t ret, void *opaque) | |
84 | { | |
85 | VirtIOBlockDataPlane *s = opaque; | |
86 | VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb); | |
87 | struct virtio_blk_inhdr hdr; | |
88 | int len; | |
89 | ||
90 | if (likely(ret >= 0)) { | |
91 | hdr.status = VIRTIO_BLK_S_OK; | |
92 | len = ret; | |
93 | } else { | |
94 | hdr.status = VIRTIO_BLK_S_IOERR; | |
95 | len = 0; | |
96 | } | |
97 | ||
98 | trace_virtio_blk_data_plane_complete_request(s, req->head, ret); | |
99 | ||
de0161c0 SH |
100 | if (req->read_qiov) { |
101 | assert(req->bounce_iov); | |
102 | qemu_iovec_from_buf(req->read_qiov, 0, req->bounce_iov->iov_base, len); | |
103 | qemu_iovec_destroy(req->read_qiov); | |
104 | g_slice_free(QEMUIOVector, req->read_qiov); | |
105 | } | |
106 | ||
107 | if (req->bounce_iov) { | |
108 | qemu_vfree(req->bounce_iov->iov_base); | |
109 | g_slice_free(struct iovec, req->bounce_iov); | |
110 | } | |
111 | ||
e72f66a0 SH |
112 | qemu_iovec_from_buf(req->inhdr, 0, &hdr, sizeof(hdr)); |
113 | qemu_iovec_destroy(req->inhdr); | |
114 | g_slice_free(QEMUIOVector, req->inhdr); | |
115 | ||
116 | /* According to the virtio specification len should be the number of bytes | |
117 | * written to, but for virtio-blk it seems to be the number of bytes | |
118 | * transferred plus the status bytes. | |
119 | */ | |
120 | vring_push(&s->vring, req->head, len + sizeof(hdr)); | |
121 | ||
122 | s->num_reqs--; | |
123 | } | |
124 | ||
125 | static void complete_request_early(VirtIOBlockDataPlane *s, unsigned int head, | |
126 | QEMUIOVector *inhdr, unsigned char status) | |
127 | { | |
128 | struct virtio_blk_inhdr hdr = { | |
129 | .status = status, | |
130 | }; | |
131 | ||
132 | qemu_iovec_from_buf(inhdr, 0, &hdr, sizeof(hdr)); | |
133 | qemu_iovec_destroy(inhdr); | |
134 | g_slice_free(QEMUIOVector, inhdr); | |
135 | ||
136 | vring_push(&s->vring, head, sizeof(hdr)); | |
137 | notify_guest(s); | |
138 | } | |
139 | ||
140 | /* Get disk serial number */ | |
141 | static void do_get_id_cmd(VirtIOBlockDataPlane *s, | |
142 | struct iovec *iov, unsigned int iov_cnt, | |
143 | unsigned int head, QEMUIOVector *inhdr) | |
144 | { | |
145 | char id[VIRTIO_BLK_ID_BYTES]; | |
146 | ||
147 | /* Serial number not NUL-terminated when shorter than buffer */ | |
148 | strncpy(id, s->blk->serial ? s->blk->serial : "", sizeof(id)); | |
149 | iov_from_buf(iov, iov_cnt, 0, id, sizeof(id)); | |
150 | complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK); | |
151 | } | |
152 | ||
b5ef1aab SH |
153 | static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read, |
154 | struct iovec *iov, unsigned int iov_cnt, | |
155 | long long offset, unsigned int head, | |
156 | QEMUIOVector *inhdr) | |
157 | { | |
158 | struct iocb *iocb; | |
de0161c0 SH |
159 | QEMUIOVector qiov; |
160 | struct iovec *bounce_iov = NULL; | |
161 | QEMUIOVector *read_qiov = NULL; | |
162 | ||
163 | qemu_iovec_init_external(&qiov, iov, iov_cnt); | |
164 | if (!bdrv_qiov_is_aligned(s->blk->conf.bs, &qiov)) { | |
165 | void *bounce_buffer = qemu_blockalign(s->blk->conf.bs, qiov.size); | |
166 | ||
167 | if (read) { | |
168 | /* Need to copy back from bounce buffer on completion */ | |
169 | read_qiov = g_slice_new(QEMUIOVector); | |
170 | qemu_iovec_init(read_qiov, iov_cnt); | |
171 | qemu_iovec_concat_iov(read_qiov, iov, iov_cnt, 0, qiov.size); | |
172 | } else { | |
173 | qemu_iovec_to_buf(&qiov, 0, bounce_buffer, qiov.size); | |
174 | } | |
175 | ||
176 | /* Redirect I/O to aligned bounce buffer */ | |
177 | bounce_iov = g_slice_new(struct iovec); | |
178 | bounce_iov->iov_base = bounce_buffer; | |
179 | bounce_iov->iov_len = qiov.size; | |
180 | iov = bounce_iov; | |
181 | iov_cnt = 1; | |
182 | } | |
b5ef1aab SH |
183 | |
184 | iocb = ioq_rdwr(&s->ioqueue, read, iov, iov_cnt, offset); | |
185 | ||
186 | /* Fill in virtio block metadata needed for completion */ | |
187 | VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb); | |
188 | req->head = head; | |
189 | req->inhdr = inhdr; | |
de0161c0 SH |
190 | req->bounce_iov = bounce_iov; |
191 | req->read_qiov = read_qiov; | |
b5ef1aab SH |
192 | return 0; |
193 | } | |
194 | ||
e72f66a0 SH |
195 | static int process_request(IOQueue *ioq, struct iovec iov[], |
196 | unsigned int out_num, unsigned int in_num, | |
197 | unsigned int head) | |
198 | { | |
199 | VirtIOBlockDataPlane *s = container_of(ioq, VirtIOBlockDataPlane, ioqueue); | |
200 | struct iovec *in_iov = &iov[out_num]; | |
201 | struct virtio_blk_outhdr outhdr; | |
202 | QEMUIOVector *inhdr; | |
203 | size_t in_size; | |
e72f66a0 SH |
204 | |
205 | /* Copy in outhdr */ | |
206 | if (unlikely(iov_to_buf(iov, out_num, 0, &outhdr, | |
207 | sizeof(outhdr)) != sizeof(outhdr))) { | |
208 | error_report("virtio-blk request outhdr too short"); | |
209 | return -EFAULT; | |
210 | } | |
211 | iov_discard_front(&iov, &out_num, sizeof(outhdr)); | |
212 | ||
213 | /* Grab inhdr for later */ | |
214 | in_size = iov_size(in_iov, in_num); | |
215 | if (in_size < sizeof(struct virtio_blk_inhdr)) { | |
216 | error_report("virtio_blk request inhdr too short"); | |
217 | return -EFAULT; | |
218 | } | |
219 | inhdr = g_slice_new(QEMUIOVector); | |
220 | qemu_iovec_init(inhdr, 1); | |
221 | qemu_iovec_concat_iov(inhdr, in_iov, in_num, | |
222 | in_size - sizeof(struct virtio_blk_inhdr), | |
223 | sizeof(struct virtio_blk_inhdr)); | |
224 | iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr)); | |
225 | ||
226 | /* TODO Linux sets the barrier bit even when not advertised! */ | |
227 | outhdr.type &= ~VIRTIO_BLK_T_BARRIER; | |
228 | ||
229 | switch (outhdr.type) { | |
230 | case VIRTIO_BLK_T_IN: | |
b5ef1aab SH |
231 | do_rdwr_cmd(s, true, in_iov, in_num, outhdr.sector * 512, head, inhdr); |
232 | return 0; | |
e72f66a0 SH |
233 | |
234 | case VIRTIO_BLK_T_OUT: | |
b5ef1aab SH |
235 | do_rdwr_cmd(s, false, iov, out_num, outhdr.sector * 512, head, inhdr); |
236 | return 0; | |
e72f66a0 SH |
237 | |
238 | case VIRTIO_BLK_T_SCSI_CMD: | |
239 | /* TODO support SCSI commands */ | |
240 | complete_request_early(s, head, inhdr, VIRTIO_BLK_S_UNSUPP); | |
241 | return 0; | |
242 | ||
243 | case VIRTIO_BLK_T_FLUSH: | |
244 | /* TODO fdsync not supported by Linux AIO, do it synchronously here! */ | |
245 | if (qemu_fdatasync(s->fd) < 0) { | |
246 | complete_request_early(s, head, inhdr, VIRTIO_BLK_S_IOERR); | |
247 | } else { | |
248 | complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK); | |
249 | } | |
250 | return 0; | |
251 | ||
252 | case VIRTIO_BLK_T_GET_ID: | |
253 | do_get_id_cmd(s, in_iov, in_num, head, inhdr); | |
254 | return 0; | |
255 | ||
256 | default: | |
257 | error_report("virtio-blk unsupported request type %#x", outhdr.type); | |
258 | qemu_iovec_destroy(inhdr); | |
259 | g_slice_free(QEMUIOVector, inhdr); | |
260 | return -EFAULT; | |
261 | } | |
e72f66a0 SH |
262 | } |
263 | ||
2c20e711 | 264 | static void handle_notify(EventNotifier *e) |
e72f66a0 | 265 | { |
2c20e711 PB |
266 | VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane, |
267 | host_notifier); | |
e72f66a0 SH |
268 | |
269 | /* There is one array of iovecs into which all new requests are extracted | |
270 | * from the vring. Requests are read from the vring and the translated | |
271 | * descriptors are written to the iovecs array. The iovecs do not have to | |
272 | * persist across handle_notify() calls because the kernel copies the | |
273 | * iovecs on io_submit(). | |
274 | * | |
275 | * Handling io_submit() EAGAIN may require storing the requests across | |
276 | * handle_notify() calls until the kernel has sufficient resources to | |
277 | * accept more I/O. This is not implemented yet. | |
278 | */ | |
279 | struct iovec iovec[VRING_MAX]; | |
280 | struct iovec *end = &iovec[VRING_MAX]; | |
281 | struct iovec *iov = iovec; | |
282 | ||
283 | /* When a request is read from the vring, the index of the first descriptor | |
284 | * (aka head) is returned so that the completed request can be pushed onto | |
285 | * the vring later. | |
286 | * | |
287 | * The number of hypervisor read-only iovecs is out_num. The number of | |
288 | * hypervisor write-only iovecs is in_num. | |
289 | */ | |
290 | int head; | |
291 | unsigned int out_num = 0, in_num = 0; | |
292 | unsigned int num_queued; | |
293 | ||
2c20e711 | 294 | event_notifier_test_and_clear(&s->host_notifier); |
e72f66a0 SH |
295 | for (;;) { |
296 | /* Disable guest->host notifies to avoid unnecessary vmexits */ | |
297 | vring_disable_notification(s->vdev, &s->vring); | |
298 | ||
299 | for (;;) { | |
300 | head = vring_pop(s->vdev, &s->vring, iov, end, &out_num, &in_num); | |
301 | if (head < 0) { | |
302 | break; /* no more requests */ | |
303 | } | |
304 | ||
305 | trace_virtio_blk_data_plane_process_request(s, out_num, in_num, | |
306 | head); | |
307 | ||
308 | if (process_request(&s->ioqueue, iov, out_num, in_num, head) < 0) { | |
309 | vring_set_broken(&s->vring); | |
310 | break; | |
311 | } | |
312 | iov += out_num + in_num; | |
313 | } | |
314 | ||
315 | if (likely(head == -EAGAIN)) { /* vring emptied */ | |
316 | /* Re-enable guest->host notifies and stop processing the vring. | |
317 | * But if the guest has snuck in more descriptors, keep processing. | |
318 | */ | |
319 | if (vring_enable_notification(s->vdev, &s->vring)) { | |
320 | break; | |
321 | } | |
322 | } else { /* head == -ENOBUFS or fatal error, iovecs[] is depleted */ | |
323 | /* Since there are no iovecs[] left, stop processing for now. Do | |
324 | * not re-enable guest->host notifies since the I/O completion | |
325 | * handler knows to check for more vring descriptors anyway. | |
326 | */ | |
327 | break; | |
328 | } | |
329 | } | |
330 | ||
331 | num_queued = ioq_num_queued(&s->ioqueue); | |
332 | if (num_queued > 0) { | |
333 | s->num_reqs += num_queued; | |
334 | ||
335 | int rc = ioq_submit(&s->ioqueue); | |
336 | if (unlikely(rc < 0)) { | |
337 | fprintf(stderr, "ioq_submit failed %d\n", rc); | |
338 | exit(1); | |
339 | } | |
340 | } | |
341 | } | |
342 | ||
2c20e711 | 343 | static void handle_io(EventNotifier *e) |
e72f66a0 | 344 | { |
2c20e711 PB |
345 | VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane, |
346 | io_notifier); | |
e72f66a0 | 347 | |
2c20e711 | 348 | event_notifier_test_and_clear(&s->io_notifier); |
e72f66a0 SH |
349 | if (ioq_run_completion(&s->ioqueue, complete_request, s) > 0) { |
350 | notify_guest(s); | |
351 | } | |
352 | ||
353 | /* If there were more requests than iovecs, the vring will not be empty yet | |
354 | * so check again. There should now be enough resources to process more | |
355 | * requests. | |
356 | */ | |
357 | if (unlikely(vring_more_avail(&s->vring))) { | |
2c20e711 | 358 | handle_notify(&s->host_notifier); |
e72f66a0 SH |
359 | } |
360 | } | |
361 | ||
362 | static void *data_plane_thread(void *opaque) | |
363 | { | |
364 | VirtIOBlockDataPlane *s = opaque; | |
365 | ||
366 | do { | |
2c20e711 | 367 | aio_poll(s->ctx, true); |
cd7fdfe5 | 368 | } while (!s->stopping || s->num_reqs > 0); |
e72f66a0 SH |
369 | return NULL; |
370 | } | |
371 | ||
372 | static void start_data_plane_bh(void *opaque) | |
373 | { | |
374 | VirtIOBlockDataPlane *s = opaque; | |
375 | ||
376 | qemu_bh_delete(s->start_bh); | |
377 | s->start_bh = NULL; | |
378 | qemu_thread_create(&s->thread, data_plane_thread, | |
379 | s, QEMU_THREAD_JOINABLE); | |
380 | } | |
381 | ||
382 | bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk, | |
383 | VirtIOBlockDataPlane **dataplane) | |
384 | { | |
385 | VirtIOBlockDataPlane *s; | |
386 | int fd; | |
387 | ||
388 | *dataplane = NULL; | |
389 | ||
390 | if (!blk->data_plane) { | |
391 | return true; | |
392 | } | |
393 | ||
394 | if (blk->scsi) { | |
395 | error_report("device is incompatible with x-data-plane, use scsi=off"); | |
396 | return false; | |
397 | } | |
398 | ||
399 | if (blk->config_wce) { | |
400 | error_report("device is incompatible with x-data-plane, " | |
401 | "use config-wce=off"); | |
402 | return false; | |
403 | } | |
404 | ||
405 | fd = raw_get_aio_fd(blk->conf.bs); | |
406 | if (fd < 0) { | |
407 | error_report("drive is incompatible with x-data-plane, " | |
408 | "use format=raw,cache=none,aio=native"); | |
409 | return false; | |
410 | } | |
411 | ||
412 | s = g_new0(VirtIOBlockDataPlane, 1); | |
413 | s->vdev = vdev; | |
414 | s->fd = fd; | |
415 | s->blk = blk; | |
416 | ||
417 | /* Prevent block operations that conflict with data plane thread */ | |
418 | bdrv_set_in_use(blk->conf.bs, 1); | |
419 | ||
420 | error_setg(&s->migration_blocker, | |
421 | "x-data-plane does not support migration"); | |
422 | migrate_add_blocker(s->migration_blocker); | |
423 | ||
424 | *dataplane = s; | |
425 | return true; | |
426 | } | |
427 | ||
428 | void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) | |
429 | { | |
430 | if (!s) { | |
431 | return; | |
432 | } | |
433 | ||
434 | virtio_blk_data_plane_stop(s); | |
435 | migrate_del_blocker(s->migration_blocker); | |
436 | error_free(s->migration_blocker); | |
437 | bdrv_set_in_use(s->blk->conf.bs, 0); | |
438 | g_free(s); | |
439 | } | |
440 | ||
441 | void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) | |
442 | { | |
443 | VirtQueue *vq; | |
444 | int i; | |
445 | ||
446 | if (s->started) { | |
447 | return; | |
448 | } | |
449 | ||
450 | vq = virtio_get_queue(s->vdev, 0); | |
451 | if (!vring_setup(&s->vring, s->vdev, 0)) { | |
452 | return; | |
453 | } | |
454 | ||
2c20e711 | 455 | s->ctx = aio_context_new(); |
e72f66a0 SH |
456 | |
457 | /* Set up guest notifier (irq) */ | |
b8bec49c | 458 | if (s->vdev->binding->set_guest_notifiers(s->vdev->binding_opaque, 1, |
e72f66a0 SH |
459 | true) != 0) { |
460 | fprintf(stderr, "virtio-blk failed to set guest notifier, " | |
461 | "ensure -enable-kvm is set\n"); | |
462 | exit(1); | |
463 | } | |
464 | s->guest_notifier = virtio_queue_get_guest_notifier(vq); | |
465 | ||
466 | /* Set up virtqueue notify */ | |
467 | if (s->vdev->binding->set_host_notifier(s->vdev->binding_opaque, | |
468 | 0, true) != 0) { | |
469 | fprintf(stderr, "virtio-blk failed to set host notifier\n"); | |
470 | exit(1); | |
471 | } | |
2c20e711 PB |
472 | s->host_notifier = *virtio_queue_get_host_notifier(vq); |
473 | aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify, NULL); | |
e72f66a0 SH |
474 | |
475 | /* Set up ioqueue */ | |
476 | ioq_init(&s->ioqueue, s->fd, REQ_MAX); | |
477 | for (i = 0; i < ARRAY_SIZE(s->requests); i++) { | |
478 | ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb); | |
479 | } | |
2c20e711 PB |
480 | s->io_notifier = *ioq_get_notifier(&s->ioqueue); |
481 | aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io, NULL); | |
e72f66a0 SH |
482 | |
483 | s->started = true; | |
484 | trace_virtio_blk_data_plane_start(s); | |
485 | ||
486 | /* Kick right away to begin processing requests already in vring */ | |
487 | event_notifier_set(virtio_queue_get_host_notifier(vq)); | |
488 | ||
489 | /* Spawn thread in BH so it inherits iothread cpusets */ | |
490 | s->start_bh = qemu_bh_new(start_data_plane_bh, s); | |
491 | qemu_bh_schedule(s->start_bh); | |
492 | } | |
493 | ||
494 | void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) | |
495 | { | |
cd7fdfe5 | 496 | if (!s->started || s->stopping) { |
e72f66a0 SH |
497 | return; |
498 | } | |
cd7fdfe5 | 499 | s->stopping = true; |
e72f66a0 SH |
500 | trace_virtio_blk_data_plane_stop(s); |
501 | ||
502 | /* Stop thread or cancel pending thread creation BH */ | |
503 | if (s->start_bh) { | |
504 | qemu_bh_delete(s->start_bh); | |
505 | s->start_bh = NULL; | |
506 | } else { | |
2c20e711 | 507 | aio_notify(s->ctx); |
e72f66a0 SH |
508 | qemu_thread_join(&s->thread); |
509 | } | |
510 | ||
2c20e711 | 511 | aio_set_event_notifier(s->ctx, &s->io_notifier, NULL, NULL); |
e72f66a0 SH |
512 | ioq_cleanup(&s->ioqueue); |
513 | ||
2c20e711 | 514 | aio_set_event_notifier(s->ctx, &s->host_notifier, NULL, NULL); |
e72f66a0 SH |
515 | s->vdev->binding->set_host_notifier(s->vdev->binding_opaque, 0, false); |
516 | ||
2c20e711 | 517 | aio_context_unref(s->ctx); |
e72f66a0 SH |
518 | |
519 | /* Clean up guest notifier (irq) */ | |
b8bec49c | 520 | s->vdev->binding->set_guest_notifiers(s->vdev->binding_opaque, 1, false); |
e72f66a0 SH |
521 | |
522 | vring_teardown(&s->vring); | |
cd7fdfe5 SH |
523 | s->started = false; |
524 | s->stopping = false; | |
e72f66a0 | 525 | } |