]> git.proxmox.com Git - qemu.git/blob - hw/xen_disk.c
192e81746f23df6eab5ee789e0c5e2bc0b159ca4
[qemu.git] / hw / xen_disk.c
1 /*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <stdarg.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <signal.h>
25 #include <inttypes.h>
26 #include <time.h>
27 #include <fcntl.h>
28 #include <errno.h>
29 #include <sys/ioctl.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/mman.h>
33 #include <sys/uio.h>
34
35 #include <xs.h>
36 #include <xenctrl.h>
37 #include <xen/io/xenbus.h>
38
39 #include "hw.h"
40 #include "block_int.h"
41 #include "qemu-char.h"
42 #include "xen_blkif.h"
43 #include "xen_backend.h"
44 #include "blockdev.h"
45
46 /* ------------------------------------------------------------- */
47
48 static int syncwrite = 0;
49 static int batch_maps = 0;
50
51 static int max_requests = 32;
52
53 /* ------------------------------------------------------------- */
54
55 #define BLOCK_SIZE 512
56 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
57
58 struct ioreq {
59 blkif_request_t req;
60 int16_t status;
61
62 /* parsed request */
63 off_t start;
64 QEMUIOVector v;
65 int presync;
66 int postsync;
67
68 /* grant mapping */
69 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
70 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
71 int prot;
72 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73 void *pages;
74
75 /* aio status */
76 int aio_inflight;
77 int aio_errors;
78
79 struct XenBlkDev *blkdev;
80 QLIST_ENTRY(ioreq) list;
81 BlockAcctCookie acct;
82 };
83
84 struct XenBlkDev {
85 struct XenDevice xendev; /* must be first */
86 char *params;
87 char *mode;
88 char *type;
89 char *dev;
90 char *devtype;
91 const char *fileproto;
92 const char *filename;
93 int ring_ref;
94 void *sring;
95 int64_t file_blk;
96 int64_t file_size;
97 int protocol;
98 blkif_back_rings_t rings;
99 int more_work;
100 int cnt_map;
101
102 /* request lists */
103 QLIST_HEAD(inflight_head, ioreq) inflight;
104 QLIST_HEAD(finished_head, ioreq) finished;
105 QLIST_HEAD(freelist_head, ioreq) freelist;
106 int requests_total;
107 int requests_inflight;
108 int requests_finished;
109
110 /* qemu block driver */
111 DriveInfo *dinfo;
112 BlockDriverState *bs;
113 QEMUBH *bh;
114 };
115
116 /* ------------------------------------------------------------- */
117
118 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
119 {
120 struct ioreq *ioreq = NULL;
121
122 if (QLIST_EMPTY(&blkdev->freelist)) {
123 if (blkdev->requests_total >= max_requests) {
124 goto out;
125 }
126 /* allocate new struct */
127 ioreq = g_malloc0(sizeof(*ioreq));
128 ioreq->blkdev = blkdev;
129 blkdev->requests_total++;
130 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
131 } else {
132 /* get one from freelist */
133 ioreq = QLIST_FIRST(&blkdev->freelist);
134 QLIST_REMOVE(ioreq, list);
135 qemu_iovec_reset(&ioreq->v);
136 }
137 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
138 blkdev->requests_inflight++;
139
140 out:
141 return ioreq;
142 }
143
144 static void ioreq_finish(struct ioreq *ioreq)
145 {
146 struct XenBlkDev *blkdev = ioreq->blkdev;
147
148 QLIST_REMOVE(ioreq, list);
149 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
150 blkdev->requests_inflight--;
151 blkdev->requests_finished++;
152 }
153
154 static void ioreq_release(struct ioreq *ioreq)
155 {
156 struct XenBlkDev *blkdev = ioreq->blkdev;
157
158 QLIST_REMOVE(ioreq, list);
159 memset(ioreq, 0, sizeof(*ioreq));
160 ioreq->blkdev = blkdev;
161 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
162 blkdev->requests_finished--;
163 }
164
165 /*
166 * translate request into iovec + start offset
167 * do sanity checks along the way
168 */
169 static int ioreq_parse(struct ioreq *ioreq)
170 {
171 struct XenBlkDev *blkdev = ioreq->blkdev;
172 uintptr_t mem;
173 size_t len;
174 int i;
175
176 xen_be_printf(&blkdev->xendev, 3,
177 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
178 ioreq->req.operation, ioreq->req.nr_segments,
179 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
180 switch (ioreq->req.operation) {
181 case BLKIF_OP_READ:
182 ioreq->prot = PROT_WRITE; /* to memory */
183 break;
184 case BLKIF_OP_WRITE_BARRIER:
185 if (!ioreq->req.nr_segments) {
186 ioreq->presync = 1;
187 return 0;
188 }
189 if (!syncwrite) {
190 ioreq->presync = ioreq->postsync = 1;
191 }
192 /* fall through */
193 case BLKIF_OP_WRITE:
194 ioreq->prot = PROT_READ; /* from memory */
195 if (syncwrite) {
196 ioreq->postsync = 1;
197 }
198 break;
199 default:
200 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
201 ioreq->req.operation);
202 goto err;
203 };
204
205 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
206 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
207 goto err;
208 }
209
210 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
211 for (i = 0; i < ioreq->req.nr_segments; i++) {
212 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
213 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
214 goto err;
215 }
216 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
217 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
218 goto err;
219 }
220 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
221 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
222 goto err;
223 }
224
225 ioreq->domids[i] = blkdev->xendev.dom;
226 ioreq->refs[i] = ioreq->req.seg[i].gref;
227
228 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
229 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
230 qemu_iovec_add(&ioreq->v, (void*)mem, len);
231 }
232 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
233 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
234 goto err;
235 }
236 return 0;
237
238 err:
239 ioreq->status = BLKIF_RSP_ERROR;
240 return -1;
241 }
242
243 static void ioreq_unmap(struct ioreq *ioreq)
244 {
245 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
246 int i;
247
248 if (ioreq->v.niov == 0) {
249 return;
250 }
251 if (batch_maps) {
252 if (!ioreq->pages) {
253 return;
254 }
255 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
256 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
257 strerror(errno));
258 }
259 ioreq->blkdev->cnt_map -= ioreq->v.niov;
260 ioreq->pages = NULL;
261 } else {
262 for (i = 0; i < ioreq->v.niov; i++) {
263 if (!ioreq->page[i]) {
264 continue;
265 }
266 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
267 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
268 strerror(errno));
269 }
270 ioreq->blkdev->cnt_map--;
271 ioreq->page[i] = NULL;
272 }
273 }
274 }
275
276 static int ioreq_map(struct ioreq *ioreq)
277 {
278 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
279 int i;
280
281 if (ioreq->v.niov == 0) {
282 return 0;
283 }
284 if (batch_maps) {
285 ioreq->pages = xc_gnttab_map_grant_refs
286 (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
287 if (ioreq->pages == NULL) {
288 xen_be_printf(&ioreq->blkdev->xendev, 0,
289 "can't map %d grant refs (%s, %d maps)\n",
290 ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
291 return -1;
292 }
293 for (i = 0; i < ioreq->v.niov; i++) {
294 ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
295 (uintptr_t)ioreq->v.iov[i].iov_base;
296 }
297 ioreq->blkdev->cnt_map += ioreq->v.niov;
298 } else {
299 for (i = 0; i < ioreq->v.niov; i++) {
300 ioreq->page[i] = xc_gnttab_map_grant_ref
301 (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
302 if (ioreq->page[i] == NULL) {
303 xen_be_printf(&ioreq->blkdev->xendev, 0,
304 "can't map grant ref %d (%s, %d maps)\n",
305 ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
306 ioreq_unmap(ioreq);
307 return -1;
308 }
309 ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
310 ioreq->blkdev->cnt_map++;
311 }
312 }
313 return 0;
314 }
315
316 static void qemu_aio_complete(void *opaque, int ret)
317 {
318 struct ioreq *ioreq = opaque;
319
320 if (ret != 0) {
321 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
322 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
323 ioreq->aio_errors++;
324 }
325
326 ioreq->aio_inflight--;
327 if (ioreq->aio_inflight > 0) {
328 return;
329 }
330
331 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
332 ioreq_unmap(ioreq);
333 ioreq_finish(ioreq);
334 bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
335 qemu_bh_schedule(ioreq->blkdev->bh);
336 }
337
338 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
339 {
340 struct XenBlkDev *blkdev = ioreq->blkdev;
341
342 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
343 goto err_no_map;
344 }
345
346 ioreq->aio_inflight++;
347 if (ioreq->presync) {
348 bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
349 }
350
351 switch (ioreq->req.operation) {
352 case BLKIF_OP_READ:
353 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
354 ioreq->aio_inflight++;
355 bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
356 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
357 qemu_aio_complete, ioreq);
358 break;
359 case BLKIF_OP_WRITE:
360 case BLKIF_OP_WRITE_BARRIER:
361 if (!ioreq->req.nr_segments) {
362 break;
363 }
364
365 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
366 ioreq->aio_inflight++;
367 bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
368 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
369 qemu_aio_complete, ioreq);
370 break;
371 default:
372 /* unknown operation (shouldn't happen -- parse catches this) */
373 goto err;
374 }
375
376 if (ioreq->postsync) {
377 bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
378 }
379 qemu_aio_complete(ioreq, 0);
380
381 return 0;
382
383 err:
384 ioreq_unmap(ioreq);
385 err_no_map:
386 ioreq_finish(ioreq);
387 ioreq->status = BLKIF_RSP_ERROR;
388 return -1;
389 }
390
391 static int blk_send_response_one(struct ioreq *ioreq)
392 {
393 struct XenBlkDev *blkdev = ioreq->blkdev;
394 int send_notify = 0;
395 int have_requests = 0;
396 blkif_response_t resp;
397 void *dst;
398
399 resp.id = ioreq->req.id;
400 resp.operation = ioreq->req.operation;
401 resp.status = ioreq->status;
402
403 /* Place on the response ring for the relevant domain. */
404 switch (blkdev->protocol) {
405 case BLKIF_PROTOCOL_NATIVE:
406 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
407 break;
408 case BLKIF_PROTOCOL_X86_32:
409 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
410 blkdev->rings.x86_32_part.rsp_prod_pvt);
411 break;
412 case BLKIF_PROTOCOL_X86_64:
413 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
414 blkdev->rings.x86_64_part.rsp_prod_pvt);
415 break;
416 default:
417 dst = NULL;
418 }
419 memcpy(dst, &resp, sizeof(resp));
420 blkdev->rings.common.rsp_prod_pvt++;
421
422 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
423 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
424 /*
425 * Tail check for pending requests. Allows frontend to avoid
426 * notifications if requests are already in flight (lower
427 * overheads and promotes batching).
428 */
429 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
430 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
431 have_requests = 1;
432 }
433
434 if (have_requests) {
435 blkdev->more_work++;
436 }
437 return send_notify;
438 }
439
440 /* walk finished list, send outstanding responses, free requests */
441 static void blk_send_response_all(struct XenBlkDev *blkdev)
442 {
443 struct ioreq *ioreq;
444 int send_notify = 0;
445
446 while (!QLIST_EMPTY(&blkdev->finished)) {
447 ioreq = QLIST_FIRST(&blkdev->finished);
448 send_notify += blk_send_response_one(ioreq);
449 ioreq_release(ioreq);
450 }
451 if (send_notify) {
452 xen_be_send_notify(&blkdev->xendev);
453 }
454 }
455
456 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
457 {
458 switch (blkdev->protocol) {
459 case BLKIF_PROTOCOL_NATIVE:
460 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
461 sizeof(ioreq->req));
462 break;
463 case BLKIF_PROTOCOL_X86_32:
464 blkif_get_x86_32_req(&ioreq->req,
465 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
466 break;
467 case BLKIF_PROTOCOL_X86_64:
468 blkif_get_x86_64_req(&ioreq->req,
469 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
470 break;
471 }
472 return 0;
473 }
474
475 static void blk_handle_requests(struct XenBlkDev *blkdev)
476 {
477 RING_IDX rc, rp;
478 struct ioreq *ioreq;
479
480 blkdev->more_work = 0;
481
482 rc = blkdev->rings.common.req_cons;
483 rp = blkdev->rings.common.sring->req_prod;
484 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
485
486 blk_send_response_all(blkdev);
487 while (rc != rp) {
488 /* pull request from ring */
489 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
490 break;
491 }
492 ioreq = ioreq_start(blkdev);
493 if (ioreq == NULL) {
494 blkdev->more_work++;
495 break;
496 }
497 blk_get_request(blkdev, ioreq, rc);
498 blkdev->rings.common.req_cons = ++rc;
499
500 /* parse them */
501 if (ioreq_parse(ioreq) != 0) {
502 if (blk_send_response_one(ioreq)) {
503 xen_be_send_notify(&blkdev->xendev);
504 }
505 ioreq_release(ioreq);
506 continue;
507 }
508
509 ioreq_runio_qemu_aio(ioreq);
510 }
511
512 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
513 qemu_bh_schedule(blkdev->bh);
514 }
515 }
516
517 /* ------------------------------------------------------------- */
518
519 static void blk_bh(void *opaque)
520 {
521 struct XenBlkDev *blkdev = opaque;
522 blk_handle_requests(blkdev);
523 }
524
525 static void blk_alloc(struct XenDevice *xendev)
526 {
527 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
528
529 QLIST_INIT(&blkdev->inflight);
530 QLIST_INIT(&blkdev->finished);
531 QLIST_INIT(&blkdev->freelist);
532 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
533 if (xen_mode != XEN_EMULATE) {
534 batch_maps = 1;
535 }
536 }
537
538 static int blk_init(struct XenDevice *xendev)
539 {
540 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
541 int index, qflags, info = 0;
542
543 /* read xenstore entries */
544 if (blkdev->params == NULL) {
545 char *h = NULL;
546 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
547 if (blkdev->params != NULL) {
548 h = strchr(blkdev->params, ':');
549 }
550 if (h != NULL) {
551 blkdev->fileproto = blkdev->params;
552 blkdev->filename = h+1;
553 *h = 0;
554 } else {
555 blkdev->fileproto = "<unset>";
556 blkdev->filename = blkdev->params;
557 }
558 }
559 if (!strcmp("aio", blkdev->fileproto)) {
560 blkdev->fileproto = "raw";
561 }
562 if (blkdev->mode == NULL) {
563 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
564 }
565 if (blkdev->type == NULL) {
566 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
567 }
568 if (blkdev->dev == NULL) {
569 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
570 }
571 if (blkdev->devtype == NULL) {
572 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
573 }
574
575 /* do we have all we need? */
576 if (blkdev->params == NULL ||
577 blkdev->mode == NULL ||
578 blkdev->type == NULL ||
579 blkdev->dev == NULL) {
580 goto out_error;
581 }
582
583 /* read-only ? */
584 if (strcmp(blkdev->mode, "w") == 0) {
585 qflags = BDRV_O_RDWR;
586 } else {
587 qflags = 0;
588 info |= VDISK_READONLY;
589 }
590
591 /* cdrom ? */
592 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
593 info |= VDISK_CDROM;
594 }
595
596 /* init qemu block driver */
597 index = (blkdev->xendev.dev - 202 * 256) / 16;
598 blkdev->dinfo = drive_get(IF_XEN, 0, index);
599 if (!blkdev->dinfo) {
600 /* setup via xenbus -> create new block driver instance */
601 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
602 blkdev->bs = bdrv_new(blkdev->dev);
603 if (blkdev->bs) {
604 if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
605 bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
606 bdrv_delete(blkdev->bs);
607 blkdev->bs = NULL;
608 }
609 }
610 if (!blkdev->bs) {
611 goto out_error;
612 }
613 } else {
614 /* setup via qemu cmdline -> already setup for us */
615 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
616 blkdev->bs = blkdev->dinfo->bdrv;
617 }
618 bdrv_attach_dev_nofail(blkdev->bs, blkdev);
619 blkdev->file_blk = BLOCK_SIZE;
620 blkdev->file_size = bdrv_getlength(blkdev->bs);
621 if (blkdev->file_size < 0) {
622 xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
623 (int)blkdev->file_size, strerror(-blkdev->file_size),
624 blkdev->bs->drv ? blkdev->bs->drv->format_name : "-");
625 blkdev->file_size = 0;
626 }
627
628 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
629 " size %" PRId64 " (%" PRId64 " MB)\n",
630 blkdev->type, blkdev->fileproto, blkdev->filename,
631 blkdev->file_size, blkdev->file_size >> 20);
632
633 /* fill info */
634 xenstore_write_be_int(&blkdev->xendev, "feature-barrier", 1);
635 xenstore_write_be_int(&blkdev->xendev, "info", info);
636 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
637 xenstore_write_be_int(&blkdev->xendev, "sectors",
638 blkdev->file_size / blkdev->file_blk);
639 return 0;
640
641 out_error:
642 g_free(blkdev->params);
643 blkdev->params = NULL;
644 g_free(blkdev->mode);
645 blkdev->mode = NULL;
646 g_free(blkdev->type);
647 blkdev->type = NULL;
648 g_free(blkdev->dev);
649 blkdev->dev = NULL;
650 g_free(blkdev->devtype);
651 blkdev->devtype = NULL;
652 return -1;
653 }
654
655 static int blk_connect(struct XenDevice *xendev)
656 {
657 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
658
659 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
660 return -1;
661 }
662 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
663 &blkdev->xendev.remote_port) == -1) {
664 return -1;
665 }
666
667 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
668 if (blkdev->xendev.protocol) {
669 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
670 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
671 }
672 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
673 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
674 }
675 }
676
677 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
678 blkdev->xendev.dom,
679 blkdev->ring_ref,
680 PROT_READ | PROT_WRITE);
681 if (!blkdev->sring) {
682 return -1;
683 }
684 blkdev->cnt_map++;
685
686 switch (blkdev->protocol) {
687 case BLKIF_PROTOCOL_NATIVE:
688 {
689 blkif_sring_t *sring_native = blkdev->sring;
690 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
691 break;
692 }
693 case BLKIF_PROTOCOL_X86_32:
694 {
695 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
696
697 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
698 break;
699 }
700 case BLKIF_PROTOCOL_X86_64:
701 {
702 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
703
704 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
705 break;
706 }
707 }
708
709 xen_be_bind_evtchn(&blkdev->xendev);
710
711 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
712 "remote port %d, local port %d\n",
713 blkdev->xendev.protocol, blkdev->ring_ref,
714 blkdev->xendev.remote_port, blkdev->xendev.local_port);
715 return 0;
716 }
717
718 static void blk_disconnect(struct XenDevice *xendev)
719 {
720 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
721
722 if (blkdev->bs) {
723 if (!blkdev->dinfo) {
724 /* close/delete only if we created it ourself */
725 bdrv_close(blkdev->bs);
726 bdrv_delete(blkdev->bs);
727 }
728 blkdev->bs = NULL;
729 }
730 xen_be_unbind_evtchn(&blkdev->xendev);
731
732 if (blkdev->sring) {
733 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
734 blkdev->cnt_map--;
735 blkdev->sring = NULL;
736 }
737 }
738
739 static int blk_free(struct XenDevice *xendev)
740 {
741 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
742 struct ioreq *ioreq;
743
744 while (!QLIST_EMPTY(&blkdev->freelist)) {
745 ioreq = QLIST_FIRST(&blkdev->freelist);
746 QLIST_REMOVE(ioreq, list);
747 qemu_iovec_destroy(&ioreq->v);
748 g_free(ioreq);
749 }
750
751 g_free(blkdev->params);
752 g_free(blkdev->mode);
753 g_free(blkdev->type);
754 g_free(blkdev->dev);
755 g_free(blkdev->devtype);
756 qemu_bh_delete(blkdev->bh);
757 return 0;
758 }
759
760 static void blk_event(struct XenDevice *xendev)
761 {
762 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
763
764 qemu_bh_schedule(blkdev->bh);
765 }
766
767 struct XenDevOps xen_blkdev_ops = {
768 .size = sizeof(struct XenBlkDev),
769 .flags = DEVOPS_FLAG_NEED_GNTDEV,
770 .alloc = blk_alloc,
771 .init = blk_init,
772 .initialise = blk_connect,
773 .disconnect = blk_disconnect,
774 .event = blk_event,
775 .free = blk_free,
776 };