]> git.proxmox.com Git - mirror_qemu.git/blob - hw/block/dataplane/xen-block.c
xen: add header and build dataplane/xen-block.c
[mirror_qemu.git] / hw / block / dataplane / xen-block.c
1 /*
2 * Copyright (c) 2018 Citrix Systems Inc.
3 * (c) Gerd Hoffmann <kraxel@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; under version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * Contributions after 2012-01-13 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
19 */
20
21 #include "qemu/osdep.h"
22 #include "qemu/error-report.h"
23 #include "qapi/error.h"
24 #include "hw/hw.h"
25 #include "hw/xen/xen_common.h"
26 #include "hw/block/xen_blkif.h"
27 #include "sysemu/block-backend.h"
28 #include "sysemu/iothread.h"
29 #include "xen-block.h"
30
31 struct ioreq {
32 blkif_request_t req;
33 int16_t status;
34 off_t start;
35 QEMUIOVector v;
36 void *buf;
37 size_t size;
38 int presync;
39 int aio_inflight;
40 int aio_errors;
41 struct XenBlkDev *blkdev;
42 QLIST_ENTRY(ioreq) list;
43 BlockAcctCookie acct;
44 };
45
46 struct XenBlkDev {
47 XenDevice *xendev;
48 XenEventChannel *event_channel;
49 unsigned int *ring_ref;
50 unsigned int nr_ring_ref;
51 void *sring;
52 int64_t file_blk;
53 int64_t file_size;
54 int protocol;
55 blkif_back_rings_t rings;
56 int more_work;
57 QLIST_HEAD(inflight_head, ioreq) inflight;
58 QLIST_HEAD(finished_head, ioreq) finished;
59 QLIST_HEAD(freelist_head, ioreq) freelist;
60 int requests_total;
61 int requests_inflight;
62 int requests_finished;
63 unsigned int max_requests;
64 BlockBackend *blk;
65 QEMUBH *bh;
66 IOThread *iothread;
67 AioContext *ctx;
68 };
69
70 static void ioreq_reset(struct ioreq *ioreq)
71 {
72 memset(&ioreq->req, 0, sizeof(ioreq->req));
73 ioreq->status = 0;
74 ioreq->start = 0;
75 ioreq->buf = NULL;
76 ioreq->size = 0;
77 ioreq->presync = 0;
78
79 ioreq->aio_inflight = 0;
80 ioreq->aio_errors = 0;
81
82 ioreq->blkdev = NULL;
83 memset(&ioreq->list, 0, sizeof(ioreq->list));
84 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
85
86 qemu_iovec_reset(&ioreq->v);
87 }
88
89 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
90 {
91 struct ioreq *ioreq = NULL;
92
93 if (QLIST_EMPTY(&blkdev->freelist)) {
94 if (blkdev->requests_total >= blkdev->max_requests) {
95 goto out;
96 }
97 /* allocate new struct */
98 ioreq = g_malloc0(sizeof(*ioreq));
99 ioreq->blkdev = blkdev;
100 blkdev->requests_total++;
101 qemu_iovec_init(&ioreq->v, 1);
102 } else {
103 /* get one from freelist */
104 ioreq = QLIST_FIRST(&blkdev->freelist);
105 QLIST_REMOVE(ioreq, list);
106 }
107 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
108 blkdev->requests_inflight++;
109
110 out:
111 return ioreq;
112 }
113
114 static void ioreq_finish(struct ioreq *ioreq)
115 {
116 struct XenBlkDev *blkdev = ioreq->blkdev;
117
118 QLIST_REMOVE(ioreq, list);
119 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
120 blkdev->requests_inflight--;
121 blkdev->requests_finished++;
122 }
123
124 static void ioreq_release(struct ioreq *ioreq, bool finish)
125 {
126 struct XenBlkDev *blkdev = ioreq->blkdev;
127
128 QLIST_REMOVE(ioreq, list);
129 ioreq_reset(ioreq);
130 ioreq->blkdev = blkdev;
131 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
132 if (finish) {
133 blkdev->requests_finished--;
134 } else {
135 blkdev->requests_inflight--;
136 }
137 }
138
139 /*
140 * translate request into iovec + start offset
141 * do sanity checks along the way
142 */
143 static int ioreq_parse(struct ioreq *ioreq)
144 {
145 struct XenBlkDev *blkdev = ioreq->blkdev;
146 size_t len;
147 int i;
148
149 switch (ioreq->req.operation) {
150 case BLKIF_OP_READ:
151 break;
152 case BLKIF_OP_FLUSH_DISKCACHE:
153 ioreq->presync = 1;
154 if (!ioreq->req.nr_segments) {
155 return 0;
156 }
157 /* fall through */
158 case BLKIF_OP_WRITE:
159 break;
160 case BLKIF_OP_DISCARD:
161 return 0;
162 default:
163 error_report("error: unknown operation (%d)", ioreq->req.operation);
164 goto err;
165 };
166
167 if (ioreq->req.operation != BLKIF_OP_READ &&
168 blk_is_read_only(blkdev->blk)) {
169 error_report("error: write req for ro device");
170 goto err;
171 }
172
173 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
174 for (i = 0; i < ioreq->req.nr_segments; i++) {
175 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
176 error_report("error: nr_segments too big");
177 goto err;
178 }
179 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
180 error_report("error: first > last sector");
181 goto err;
182 }
183 if (ioreq->req.seg[i].last_sect * blkdev->file_blk >= XC_PAGE_SIZE) {
184 error_report("error: page crossing");
185 goto err;
186 }
187
188 len = (ioreq->req.seg[i].last_sect -
189 ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
190 ioreq->size += len;
191 }
192 if (ioreq->start + ioreq->size > blkdev->file_size) {
193 error_report("error: access beyond end of file");
194 goto err;
195 }
196 return 0;
197
198 err:
199 ioreq->status = BLKIF_RSP_ERROR;
200 return -1;
201 }
202
203 static int ioreq_grant_copy(struct ioreq *ioreq)
204 {
205 struct XenBlkDev *blkdev = ioreq->blkdev;
206 XenDevice *xendev = blkdev->xendev;
207 XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
208 int i, count;
209 int64_t file_blk = blkdev->file_blk;
210 bool to_domain = (ioreq->req.operation == BLKIF_OP_READ);
211 void *virt = ioreq->buf;
212 Error *local_err = NULL;
213
214 if (ioreq->req.nr_segments == 0) {
215 return 0;
216 }
217
218 count = ioreq->req.nr_segments;
219
220 for (i = 0; i < count; i++) {
221 if (to_domain) {
222 segs[i].dest.foreign.ref = ioreq->req.seg[i].gref;
223 segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect *
224 file_blk;
225 segs[i].source.virt = virt;
226 } else {
227 segs[i].source.foreign.ref = ioreq->req.seg[i].gref;
228 segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect *
229 file_blk;
230 segs[i].dest.virt = virt;
231 }
232 segs[i].len = (ioreq->req.seg[i].last_sect -
233 ioreq->req.seg[i].first_sect + 1) * file_blk;
234 virt += segs[i].len;
235 }
236
237 xen_device_copy_grant_refs(xendev, to_domain, segs, count, &local_err);
238
239 if (local_err) {
240 error_reportf_err(local_err, "failed to copy data: ");
241
242 ioreq->aio_errors++;
243 return -1;
244 }
245
246 return 0;
247 }
248
249 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
250
251 static void qemu_aio_complete(void *opaque, int ret)
252 {
253 struct ioreq *ioreq = opaque;
254 struct XenBlkDev *blkdev = ioreq->blkdev;
255
256 aio_context_acquire(blkdev->ctx);
257
258 if (ret != 0) {
259 error_report("%s I/O error",
260 ioreq->req.operation == BLKIF_OP_READ ?
261 "read" : "write");
262 ioreq->aio_errors++;
263 }
264
265 ioreq->aio_inflight--;
266 if (ioreq->presync) {
267 ioreq->presync = 0;
268 ioreq_runio_qemu_aio(ioreq);
269 goto done;
270 }
271 if (ioreq->aio_inflight > 0) {
272 goto done;
273 }
274
275 switch (ioreq->req.operation) {
276 case BLKIF_OP_READ:
277 /* in case of failure ioreq->aio_errors is increased */
278 if (ret == 0) {
279 ioreq_grant_copy(ioreq);
280 }
281 qemu_vfree(ioreq->buf);
282 break;
283 case BLKIF_OP_WRITE:
284 case BLKIF_OP_FLUSH_DISKCACHE:
285 if (!ioreq->req.nr_segments) {
286 break;
287 }
288 qemu_vfree(ioreq->buf);
289 break;
290 default:
291 break;
292 }
293
294 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
295 ioreq_finish(ioreq);
296
297 switch (ioreq->req.operation) {
298 case BLKIF_OP_WRITE:
299 case BLKIF_OP_FLUSH_DISKCACHE:
300 if (!ioreq->req.nr_segments) {
301 break;
302 }
303 case BLKIF_OP_READ:
304 if (ioreq->status == BLKIF_RSP_OKAY) {
305 block_acct_done(blk_get_stats(blkdev->blk), &ioreq->acct);
306 } else {
307 block_acct_failed(blk_get_stats(blkdev->blk), &ioreq->acct);
308 }
309 break;
310 case BLKIF_OP_DISCARD:
311 default:
312 break;
313 }
314 qemu_bh_schedule(blkdev->bh);
315
316 done:
317 aio_context_release(blkdev->ctx);
318 }
319
320 static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
321 uint64_t nr_sectors)
322 {
323 struct XenBlkDev *blkdev = ioreq->blkdev;
324 int64_t byte_offset;
325 int byte_chunk;
326 uint64_t byte_remaining, limit;
327 uint64_t sec_start = sector_number;
328 uint64_t sec_count = nr_sectors;
329
330 /* Wrap around, or overflowing byte limit? */
331 if (sec_start + sec_count < sec_count ||
332 sec_start + sec_count > INT64_MAX / blkdev->file_blk) {
333 return false;
334 }
335
336 limit = BDRV_REQUEST_MAX_SECTORS * blkdev->file_blk;
337 byte_offset = sec_start * blkdev->file_blk;
338 byte_remaining = sec_count * blkdev->file_blk;
339
340 do {
341 byte_chunk = byte_remaining > limit ? limit : byte_remaining;
342 ioreq->aio_inflight++;
343 blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
344 qemu_aio_complete, ioreq);
345 byte_remaining -= byte_chunk;
346 byte_offset += byte_chunk;
347 } while (byte_remaining > 0);
348
349 return true;
350 }
351
352 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
353 {
354 struct XenBlkDev *blkdev = ioreq->blkdev;
355
356 ioreq->buf = qemu_memalign(XC_PAGE_SIZE, ioreq->size);
357 if (ioreq->req.nr_segments &&
358 (ioreq->req.operation == BLKIF_OP_WRITE ||
359 ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
360 ioreq_grant_copy(ioreq)) {
361 qemu_vfree(ioreq->buf);
362 goto err;
363 }
364
365 ioreq->aio_inflight++;
366 if (ioreq->presync) {
367 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
368 return 0;
369 }
370
371 switch (ioreq->req.operation) {
372 case BLKIF_OP_READ:
373 qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
374 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
375 ioreq->v.size, BLOCK_ACCT_READ);
376 ioreq->aio_inflight++;
377 blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
378 qemu_aio_complete, ioreq);
379 break;
380 case BLKIF_OP_WRITE:
381 case BLKIF_OP_FLUSH_DISKCACHE:
382 if (!ioreq->req.nr_segments) {
383 break;
384 }
385
386 qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
387 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
388 ioreq->v.size,
389 ioreq->req.operation == BLKIF_OP_WRITE ?
390 BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
391 ioreq->aio_inflight++;
392 blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
393 qemu_aio_complete, ioreq);
394 break;
395 case BLKIF_OP_DISCARD:
396 {
397 struct blkif_request_discard *req = (void *)&ioreq->req;
398 if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
399 goto err;
400 }
401 break;
402 }
403 default:
404 /* unknown operation (shouldn't happen -- parse catches this) */
405 goto err;
406 }
407
408 qemu_aio_complete(ioreq, 0);
409
410 return 0;
411
412 err:
413 ioreq_finish(ioreq);
414 ioreq->status = BLKIF_RSP_ERROR;
415 return -1;
416 }
417
418 static int blk_send_response_one(struct ioreq *ioreq)
419 {
420 struct XenBlkDev *blkdev = ioreq->blkdev;
421 int send_notify = 0;
422 int have_requests = 0;
423 blkif_response_t *resp;
424
425 /* Place on the response ring for the relevant domain. */
426 switch (blkdev->protocol) {
427 case BLKIF_PROTOCOL_NATIVE:
428 resp = (blkif_response_t *)RING_GET_RESPONSE(
429 &blkdev->rings.native,
430 blkdev->rings.native.rsp_prod_pvt);
431 break;
432 case BLKIF_PROTOCOL_X86_32:
433 resp = (blkif_response_t *)RING_GET_RESPONSE(
434 &blkdev->rings.x86_32_part,
435 blkdev->rings.x86_32_part.rsp_prod_pvt);
436 break;
437 case BLKIF_PROTOCOL_X86_64:
438 resp = (blkif_response_t *)RING_GET_RESPONSE(
439 &blkdev->rings.x86_64_part,
440 blkdev->rings.x86_64_part.rsp_prod_pvt);
441 break;
442 default:
443 return 0;
444 }
445
446 resp->id = ioreq->req.id;
447 resp->operation = ioreq->req.operation;
448 resp->status = ioreq->status;
449
450 blkdev->rings.common.rsp_prod_pvt++;
451
452 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
453 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
454 /*
455 * Tail check for pending requests. Allows frontend to avoid
456 * notifications if requests are already in flight (lower
457 * overheads and promotes batching).
458 */
459 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
460 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
461 have_requests = 1;
462 }
463
464 if (have_requests) {
465 blkdev->more_work++;
466 }
467 return send_notify;
468 }
469
470 /* walk finished list, send outstanding responses, free requests */
471 static void blk_send_response_all(struct XenBlkDev *blkdev)
472 {
473 struct ioreq *ioreq;
474 int send_notify = 0;
475
476 while (!QLIST_EMPTY(&blkdev->finished)) {
477 ioreq = QLIST_FIRST(&blkdev->finished);
478 send_notify += blk_send_response_one(ioreq);
479 ioreq_release(ioreq, true);
480 }
481 if (send_notify) {
482 Error *local_err = NULL;
483
484 xen_device_notify_event_channel(blkdev->xendev,
485 blkdev->event_channel,
486 &local_err);
487 if (local_err) {
488 error_report_err(local_err);
489 }
490 }
491 }
492
493 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq,
494 RING_IDX rc)
495 {
496 switch (blkdev->protocol) {
497 case BLKIF_PROTOCOL_NATIVE:
498 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
499 sizeof(ioreq->req));
500 break;
501 case BLKIF_PROTOCOL_X86_32:
502 blkif_get_x86_32_req(&ioreq->req,
503 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
504 break;
505 case BLKIF_PROTOCOL_X86_64:
506 blkif_get_x86_64_req(&ioreq->req,
507 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
508 break;
509 }
510 /* Prevent the compiler from accessing the on-ring fields instead. */
511 barrier();
512 return 0;
513 }
514
515 static void blk_handle_requests(struct XenBlkDev *blkdev)
516 {
517 RING_IDX rc, rp;
518 struct ioreq *ioreq;
519
520 blkdev->more_work = 0;
521
522 rc = blkdev->rings.common.req_cons;
523 rp = blkdev->rings.common.sring->req_prod;
524 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
525
526 blk_send_response_all(blkdev);
527 while (rc != rp) {
528 /* pull request from ring */
529 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
530 break;
531 }
532 ioreq = ioreq_start(blkdev);
533 if (ioreq == NULL) {
534 blkdev->more_work++;
535 break;
536 }
537 blk_get_request(blkdev, ioreq, rc);
538 blkdev->rings.common.req_cons = ++rc;
539
540 /* parse them */
541 if (ioreq_parse(ioreq) != 0) {
542
543 switch (ioreq->req.operation) {
544 case BLKIF_OP_READ:
545 block_acct_invalid(blk_get_stats(blkdev->blk),
546 BLOCK_ACCT_READ);
547 break;
548 case BLKIF_OP_WRITE:
549 block_acct_invalid(blk_get_stats(blkdev->blk),
550 BLOCK_ACCT_WRITE);
551 break;
552 case BLKIF_OP_FLUSH_DISKCACHE:
553 block_acct_invalid(blk_get_stats(blkdev->blk),
554 BLOCK_ACCT_FLUSH);
555 default:
556 break;
557 };
558
559 if (blk_send_response_one(ioreq)) {
560 Error *local_err = NULL;
561
562 xen_device_notify_event_channel(blkdev->xendev,
563 blkdev->event_channel,
564 &local_err);
565 if (local_err) {
566 error_report_err(local_err);
567 }
568 }
569 ioreq_release(ioreq, false);
570 continue;
571 }
572
573 ioreq_runio_qemu_aio(ioreq);
574 }
575
576 if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
577 qemu_bh_schedule(blkdev->bh);
578 }
579 }
580
581 static void blk_bh(void *opaque)
582 {
583 struct XenBlkDev *blkdev = opaque;
584
585 aio_context_acquire(blkdev->ctx);
586 blk_handle_requests(blkdev);
587 aio_context_release(blkdev->ctx);
588 }
589
590 static void blk_event(void *opaque)
591 {
592 struct XenBlkDev *blkdev = opaque;
593
594 qemu_bh_schedule(blkdev->bh);
595 }
596
597 struct XenBlkDev *xen_block_dataplane_create(XenDevice *xendev,
598 BlockConf *conf,
599 IOThread *iothread)
600 {
601 struct XenBlkDev *blkdev = g_new0(struct XenBlkDev, 1);
602
603 blkdev->xendev = xendev;
604 blkdev->file_blk = conf->logical_block_size;
605 blkdev->blk = conf->blk;
606 blkdev->file_size = blk_getlength(blkdev->blk);
607
608 QLIST_INIT(&blkdev->inflight);
609 QLIST_INIT(&blkdev->finished);
610 QLIST_INIT(&blkdev->freelist);
611
612 if (iothread) {
613 blkdev->iothread = iothread;
614 object_ref(OBJECT(blkdev->iothread));
615 blkdev->ctx = iothread_get_aio_context(blkdev->iothread);
616 } else {
617 blkdev->ctx = qemu_get_aio_context();
618 }
619 blkdev->bh = aio_bh_new(blkdev->ctx, blk_bh, blkdev);
620
621 return blkdev;
622 }
623
624 void xen_block_dataplane_destroy(struct XenBlkDev *blkdev)
625 {
626 struct ioreq *ioreq;
627
628 if (!blkdev) {
629 return;
630 }
631
632 while (!QLIST_EMPTY(&blkdev->freelist)) {
633 ioreq = QLIST_FIRST(&blkdev->freelist);
634 QLIST_REMOVE(ioreq, list);
635 qemu_iovec_destroy(&ioreq->v);
636 g_free(ioreq);
637 }
638
639 qemu_bh_delete(blkdev->bh);
640 if (blkdev->iothread) {
641 object_unref(OBJECT(blkdev->iothread));
642 }
643
644 g_free(blkdev);
645 }
646
647
648 void xen_block_dataplane_stop(struct XenBlkDev *blkdev)
649 {
650 XenDevice *xendev;
651
652 if (!blkdev) {
653 return;
654 }
655
656 aio_context_acquire(blkdev->ctx);
657 blk_set_aio_context(blkdev->blk, qemu_get_aio_context());
658 aio_context_release(blkdev->ctx);
659
660 xendev = blkdev->xendev;
661
662 if (blkdev->event_channel) {
663 Error *local_err = NULL;
664
665 xen_device_unbind_event_channel(xendev, blkdev->event_channel,
666 &local_err);
667 blkdev->event_channel = NULL;
668
669 if (local_err) {
670 error_report_err(local_err);
671 }
672 }
673
674 if (blkdev->sring) {
675 Error *local_err = NULL;
676
677 xen_device_unmap_grant_refs(xendev, blkdev->sring,
678 blkdev->nr_ring_ref, &local_err);
679 blkdev->sring = NULL;
680
681 if (local_err) {
682 error_report_err(local_err);
683 }
684 }
685
686 g_free(blkdev->ring_ref);
687 blkdev->ring_ref = NULL;
688 }
689
690 void xen_block_dataplane_start(struct XenBlkDev *blkdev,
691 const unsigned int ring_ref[],
692 unsigned int nr_ring_ref,
693 unsigned int event_channel,
694 unsigned int protocol,
695 Error **errp)
696 {
697 XenDevice *xendev = blkdev->xendev;
698 Error *local_err = NULL;
699 unsigned int ring_size;
700 unsigned int i;
701
702 blkdev->nr_ring_ref = nr_ring_ref;
703 blkdev->ring_ref = g_new(unsigned int, nr_ring_ref);
704
705 for (i = 0; i < nr_ring_ref; i++) {
706 blkdev->ring_ref[i] = ring_ref[i];
707 }
708
709 blkdev->protocol = protocol;
710
711 ring_size = XC_PAGE_SIZE * blkdev->nr_ring_ref;
712 switch (blkdev->protocol) {
713 case BLKIF_PROTOCOL_NATIVE:
714 {
715 blkdev->max_requests = __CONST_RING_SIZE(blkif, ring_size);
716 break;
717 }
718 case BLKIF_PROTOCOL_X86_32:
719 {
720 blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
721 break;
722 }
723 case BLKIF_PROTOCOL_X86_64:
724 {
725 blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
726 break;
727 }
728 default:
729 error_setg(errp, "unknown protocol %u", blkdev->protocol);
730 return;
731 }
732
733 xen_device_set_max_grant_refs(xendev, blkdev->nr_ring_ref,
734 &local_err);
735 if (local_err) {
736 error_propagate(errp, local_err);
737 goto stop;
738 }
739
740 blkdev->sring = xen_device_map_grant_refs(xendev,
741 blkdev->ring_ref,
742 blkdev->nr_ring_ref,
743 PROT_READ | PROT_WRITE,
744 &local_err);
745 if (local_err) {
746 error_propagate(errp, local_err);
747 goto stop;
748 }
749
750 switch (blkdev->protocol) {
751 case BLKIF_PROTOCOL_NATIVE:
752 {
753 blkif_sring_t *sring_native = blkdev->sring;
754
755 BACK_RING_INIT(&blkdev->rings.native, sring_native, ring_size);
756 break;
757 }
758 case BLKIF_PROTOCOL_X86_32:
759 {
760 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
761
762 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32,
763 ring_size);
764 break;
765 }
766 case BLKIF_PROTOCOL_X86_64:
767 {
768 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
769
770 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64,
771 ring_size);
772 break;
773 }
774 }
775
776 blkdev->event_channel =
777 xen_device_bind_event_channel(xendev, event_channel,
778 blk_event, blkdev,
779 &local_err);
780 if (local_err) {
781 error_propagate(errp, local_err);
782 goto stop;
783 }
784
785 aio_context_acquire(blkdev->ctx);
786 blk_set_aio_context(blkdev->blk, blkdev->ctx);
787 aio_context_release(blkdev->ctx);
788 return;
789
790 stop:
791 xen_block_dataplane_stop(blkdev);
792 }