]> git.proxmox.com Git - qemu.git/blob - hw/xen_disk.c
xen: add block device backend driver. (Gerd Hoffmann)
[qemu.git] / hw / xen_disk.c
1 /*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <unistd.h>
25 #include <signal.h>
26 #include <inttypes.h>
27 #include <time.h>
28 #include <fcntl.h>
29 #include <errno.h>
30 #include <sys/ioctl.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/mman.h>
34 #include <sys/uio.h>
35
36 #include <xs.h>
37 #include <xenctrl.h>
38 #include <xen/io/xenbus.h>
39
40 #include "hw.h"
41 #include "block_int.h"
42 #include "qemu-char.h"
43 #include "xen_blkif.h"
44 #include "xen_backend.h"
45
46 /* ------------------------------------------------------------- */
47
48 static int syncwrite = 0;
49 static int batch_maps = 0;
50
51 static int max_requests = 32;
52 static int use_aio = 1;
53
54 /* ------------------------------------------------------------- */
55
56 #define BLOCK_SIZE 512
57 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
58
59 struct ioreq {
60 blkif_request_t req;
61 int16_t status;
62
63 /* parsed request */
64 off_t start;
65 QEMUIOVector v;
66 int presync;
67 int postsync;
68
69 /* grant mapping */
70 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
71 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72 int prot;
73 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 void *pages;
75
76 /* aio status */
77 int aio_inflight;
78 int aio_errors;
79
80 struct XenBlkDev *blkdev;
81 LIST_ENTRY(ioreq) list;
82 };
83
84 struct XenBlkDev {
85 struct XenDevice xendev; /* must be first */
86 char *params;
87 char *mode;
88 char *type;
89 char *dev;
90 char *devtype;
91 const char *fileproto;
92 const char *filename;
93 int ring_ref;
94 void *sring;
95 int64_t file_blk;
96 int64_t file_size;
97 int protocol;
98 blkif_back_rings_t rings;
99 int more_work;
100 int cnt_map;
101
102 /* request lists */
103 LIST_HEAD(inflight_head, ioreq) inflight;
104 LIST_HEAD(finished_head, ioreq) finished;
105 LIST_HEAD(freelist_head, ioreq) freelist;
106 int requests_total;
107 int requests_inflight;
108 int requests_finished;
109
110 /* qemu block driver */
111 int index;
112 BlockDriverState *bs;
113 QEMUBH *bh;
114 };
115
116 /* ------------------------------------------------------------- */
117
118 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
119 {
120 struct ioreq *ioreq = NULL;
121
122 if (LIST_EMPTY(&blkdev->freelist)) {
123 if (blkdev->requests_total >= max_requests)
124 goto out;
125 /* allocate new struct */
126 ioreq = qemu_mallocz(sizeof(*ioreq));
127 ioreq->blkdev = blkdev;
128 blkdev->requests_total++;
129 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
130 } else {
131 /* get one from freelist */
132 ioreq = LIST_FIRST(&blkdev->freelist);
133 LIST_REMOVE(ioreq, list);
134 qemu_iovec_reset(&ioreq->v);
135 }
136 LIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
137 blkdev->requests_inflight++;
138
139 out:
140 return ioreq;
141 }
142
143 static void ioreq_finish(struct ioreq *ioreq)
144 {
145 struct XenBlkDev *blkdev = ioreq->blkdev;
146
147 LIST_REMOVE(ioreq, list);
148 LIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
149 blkdev->requests_inflight--;
150 blkdev->requests_finished++;
151 }
152
153 static void ioreq_release(struct ioreq *ioreq)
154 {
155 struct XenBlkDev *blkdev = ioreq->blkdev;
156
157 LIST_REMOVE(ioreq, list);
158 memset(ioreq, 0, sizeof(*ioreq));
159 ioreq->blkdev = blkdev;
160 LIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
161 blkdev->requests_finished--;
162 }
163
164 /*
165 * translate request into iovec + start offset
166 * do sanity checks along the way
167 */
168 static int ioreq_parse(struct ioreq *ioreq)
169 {
170 struct XenBlkDev *blkdev = ioreq->blkdev;
171 uintptr_t mem;
172 size_t len;
173 int i;
174
175 xen_be_printf(&blkdev->xendev, 3,
176 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
177 ioreq->req.operation, ioreq->req.nr_segments,
178 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
179 switch (ioreq->req.operation) {
180 case BLKIF_OP_READ:
181 ioreq->prot = PROT_WRITE; /* to memory */
182 if (BLKIF_OP_READ != ioreq->req.operation && blkdev->mode[0] != 'w') {
183 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
184 goto err;
185 }
186 break;
187 case BLKIF_OP_WRITE_BARRIER:
188 if (!syncwrite)
189 ioreq->presync = ioreq->postsync = 1;
190 /* fall through */
191 case BLKIF_OP_WRITE:
192 ioreq->prot = PROT_READ; /* from memory */
193 if (syncwrite)
194 ioreq->postsync = 1;
195 break;
196 default:
197 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
198 ioreq->req.operation);
199 goto err;
200 };
201
202 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
203 for (i = 0; i < ioreq->req.nr_segments; i++) {
204 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
205 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
206 goto err;
207 }
208 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
209 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
210 goto err;
211 }
212 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
213 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
214 goto err;
215 }
216
217 ioreq->domids[i] = blkdev->xendev.dom;
218 ioreq->refs[i] = ioreq->req.seg[i].gref;
219
220 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
221 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
222 qemu_iovec_add(&ioreq->v, (void*)mem, len);
223 }
224 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
225 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
226 goto err;
227 }
228 return 0;
229
230 err:
231 ioreq->status = BLKIF_RSP_ERROR;
232 return -1;
233 }
234
235 static void ioreq_unmap(struct ioreq *ioreq)
236 {
237 int gnt = ioreq->blkdev->xendev.gnttabdev;
238 int i;
239
240 if (ioreq->v.niov == 0)
241 return;
242 if (batch_maps) {
243 if (!ioreq->pages)
244 return;
245 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0)
246 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
247 strerror(errno));
248 ioreq->blkdev->cnt_map -= ioreq->v.niov;
249 ioreq->pages = NULL;
250 } else {
251 for (i = 0; i < ioreq->v.niov; i++) {
252 if (!ioreq->page[i])
253 continue;
254 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0)
255 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
256 strerror(errno));
257 ioreq->blkdev->cnt_map--;
258 ioreq->page[i] = NULL;
259 }
260 }
261 }
262
263 static int ioreq_map(struct ioreq *ioreq)
264 {
265 int gnt = ioreq->blkdev->xendev.gnttabdev;
266 int i;
267
268 if (ioreq->v.niov == 0)
269 return 0;
270 if (batch_maps) {
271 ioreq->pages = xc_gnttab_map_grant_refs
272 (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
273 if (ioreq->pages == NULL) {
274 xen_be_printf(&ioreq->blkdev->xendev, 0,
275 "can't map %d grant refs (%s, %d maps)\n",
276 ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
277 return -1;
278 }
279 for (i = 0; i < ioreq->v.niov; i++)
280 ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
281 (uintptr_t)ioreq->v.iov[i].iov_base;
282 ioreq->blkdev->cnt_map += ioreq->v.niov;
283 } else {
284 for (i = 0; i < ioreq->v.niov; i++) {
285 ioreq->page[i] = xc_gnttab_map_grant_ref
286 (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
287 if (ioreq->page[i] == NULL) {
288 xen_be_printf(&ioreq->blkdev->xendev, 0,
289 "can't map grant ref %d (%s, %d maps)\n",
290 ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
291 ioreq_unmap(ioreq);
292 return -1;
293 }
294 ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
295 ioreq->blkdev->cnt_map++;
296 }
297 }
298 return 0;
299 }
300
301 static int ioreq_runio_qemu_sync(struct ioreq *ioreq)
302 {
303 struct XenBlkDev *blkdev = ioreq->blkdev;
304 int i, rc, len = 0;
305 off_t pos;
306
307 if (ioreq_map(ioreq) == -1)
308 goto err;
309 if (ioreq->presync)
310 bdrv_flush(blkdev->bs);
311
312 switch (ioreq->req.operation) {
313 case BLKIF_OP_READ:
314 pos = ioreq->start;
315 for (i = 0; i < ioreq->v.niov; i++) {
316 rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE,
317 ioreq->v.iov[i].iov_base,
318 ioreq->v.iov[i].iov_len / BLOCK_SIZE);
319 if (rc != 0) {
320 xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n",
321 ioreq->v.iov[i].iov_base,
322 ioreq->v.iov[i].iov_len);
323 goto err;
324 }
325 len += ioreq->v.iov[i].iov_len;
326 pos += ioreq->v.iov[i].iov_len;
327 }
328 break;
329 case BLKIF_OP_WRITE:
330 case BLKIF_OP_WRITE_BARRIER:
331 pos = ioreq->start;
332 for (i = 0; i < ioreq->v.niov; i++) {
333 rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE,
334 ioreq->v.iov[i].iov_base,
335 ioreq->v.iov[i].iov_len / BLOCK_SIZE);
336 if (rc != 0) {
337 xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n",
338 ioreq->v.iov[i].iov_base,
339 ioreq->v.iov[i].iov_len);
340 goto err;
341 }
342 len += ioreq->v.iov[i].iov_len;
343 pos += ioreq->v.iov[i].iov_len;
344 }
345 break;
346 default:
347 /* unknown operation (shouldn't happen -- parse catches this) */
348 goto err;
349 }
350
351 if (ioreq->postsync)
352 bdrv_flush(blkdev->bs);
353 ioreq->status = BLKIF_RSP_OKAY;
354
355 ioreq_unmap(ioreq);
356 ioreq_finish(ioreq);
357 return 0;
358
359 err:
360 ioreq->status = BLKIF_RSP_ERROR;
361 return -1;
362 }
363
364 static void qemu_aio_complete(void *opaque, int ret)
365 {
366 struct ioreq *ioreq = opaque;
367
368 if (ret != 0) {
369 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
370 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
371 ioreq->aio_errors++;
372 }
373
374 ioreq->aio_inflight--;
375 if (ioreq->aio_inflight > 0)
376 return;
377
378 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
379 ioreq_unmap(ioreq);
380 ioreq_finish(ioreq);
381 qemu_bh_schedule(ioreq->blkdev->bh);
382 }
383
384 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
385 {
386 struct XenBlkDev *blkdev = ioreq->blkdev;
387
388 if (ioreq_map(ioreq) == -1)
389 goto err;
390
391 ioreq->aio_inflight++;
392 if (ioreq->presync)
393 bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
394
395 switch (ioreq->req.operation) {
396 case BLKIF_OP_READ:
397 ioreq->aio_inflight++;
398 bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
399 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
400 qemu_aio_complete, ioreq);
401 break;
402 case BLKIF_OP_WRITE:
403 case BLKIF_OP_WRITE_BARRIER:
404 ioreq->aio_inflight++;
405 bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
406 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
407 qemu_aio_complete, ioreq);
408 break;
409 default:
410 /* unknown operation (shouldn't happen -- parse catches this) */
411 goto err;
412 }
413
414 if (ioreq->postsync)
415 bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
416 qemu_aio_complete(ioreq, 0);
417
418 return 0;
419
420 err:
421 ioreq->status = BLKIF_RSP_ERROR;
422 return -1;
423 }
424
425 static int blk_send_response_one(struct ioreq *ioreq)
426 {
427 struct XenBlkDev *blkdev = ioreq->blkdev;
428 int send_notify = 0;
429 int have_requests = 0;
430 blkif_response_t resp;
431 void *dst;
432
433 resp.id = ioreq->req.id;
434 resp.operation = ioreq->req.operation;
435 resp.status = ioreq->status;
436
437 /* Place on the response ring for the relevant domain. */
438 switch (blkdev->protocol) {
439 case BLKIF_PROTOCOL_NATIVE:
440 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
441 break;
442 case BLKIF_PROTOCOL_X86_32:
443 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32, blkdev->rings.x86_32.rsp_prod_pvt);
444 break;
445 case BLKIF_PROTOCOL_X86_64:
446 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64, blkdev->rings.x86_64.rsp_prod_pvt);
447 break;
448 default:
449 dst = NULL;
450 }
451 memcpy(dst, &resp, sizeof(resp));
452 blkdev->rings.common.rsp_prod_pvt++;
453
454 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
455 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
456 /*
457 * Tail check for pending requests. Allows frontend to avoid
458 * notifications if requests are already in flight (lower
459 * overheads and promotes batching).
460 */
461 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
462 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
463 have_requests = 1;
464 }
465
466 if (have_requests)
467 blkdev->more_work++;
468 return send_notify;
469 }
470
471 /* walk finished list, send outstanding responses, free requests */
472 static void blk_send_response_all(struct XenBlkDev *blkdev)
473 {
474 struct ioreq *ioreq;
475 int send_notify = 0;
476
477 while (!LIST_EMPTY(&blkdev->finished)) {
478 ioreq = LIST_FIRST(&blkdev->finished);
479 send_notify += blk_send_response_one(ioreq);
480 ioreq_release(ioreq);
481 }
482 if (send_notify)
483 xen_be_send_notify(&blkdev->xendev);
484 }
485
486 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
487 {
488 switch (blkdev->protocol) {
489 case BLKIF_PROTOCOL_NATIVE:
490 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
491 sizeof(ioreq->req));
492 break;
493 case BLKIF_PROTOCOL_X86_32:
494 blkif_get_x86_32_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_32, rc));
495 break;
496 case BLKIF_PROTOCOL_X86_64:
497 blkif_get_x86_64_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_64, rc));
498 break;
499 }
500 return 0;
501 }
502
503 static void blk_handle_requests(struct XenBlkDev *blkdev)
504 {
505 RING_IDX rc, rp;
506 struct ioreq *ioreq;
507
508 blkdev->more_work = 0;
509
510 rc = blkdev->rings.common.req_cons;
511 rp = blkdev->rings.common.sring->req_prod;
512 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
513
514 if (use_aio)
515 blk_send_response_all(blkdev);
516 while ((rc != rp)) {
517 /* pull request from ring */
518 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc))
519 break;
520 ioreq = ioreq_start(blkdev);
521 if (ioreq == NULL) {
522 blkdev->more_work++;
523 break;
524 }
525 blk_get_request(blkdev, ioreq, rc);
526 blkdev->rings.common.req_cons = ++rc;
527
528 /* parse them */
529 if (ioreq_parse(ioreq) != 0) {
530 if (blk_send_response_one(ioreq))
531 xen_be_send_notify(&blkdev->xendev);
532 ioreq_release(ioreq);
533 continue;
534 }
535
536 if (use_aio) {
537 /* run i/o in aio mode */
538 ioreq_runio_qemu_aio(ioreq);
539 } else {
540 /* run i/o in sync mode */
541 ioreq_runio_qemu_sync(ioreq);
542 }
543 }
544 if (!use_aio)
545 blk_send_response_all(blkdev);
546
547 if (blkdev->more_work && blkdev->requests_inflight < max_requests)
548 qemu_bh_schedule(blkdev->bh);
549 }
550
551 /* ------------------------------------------------------------- */
552
553 static void blk_bh(void *opaque)
554 {
555 struct XenBlkDev *blkdev = opaque;
556 blk_handle_requests(blkdev);
557 }
558
559 static void blk_alloc(struct XenDevice *xendev)
560 {
561 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
562
563 LIST_INIT(&blkdev->inflight);
564 LIST_INIT(&blkdev->finished);
565 LIST_INIT(&blkdev->freelist);
566 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
567 if (xen_mode != XEN_EMULATE)
568 batch_maps = 1;
569 }
570
571 static int blk_init(struct XenDevice *xendev)
572 {
573 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
574 int mode, qflags, have_barriers, info = 0;
575 char *h;
576
577 /* read xenstore entries */
578 if (blkdev->params == NULL) {
579 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
580 h = strchr(blkdev->params, ':');
581 if (h != NULL) {
582 blkdev->fileproto = blkdev->params;
583 blkdev->filename = h+1;
584 *h = 0;
585 } else {
586 blkdev->fileproto = "<unset>";
587 blkdev->filename = blkdev->params;
588 }
589 }
590 if (blkdev->mode == NULL)
591 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
592 if (blkdev->type == NULL)
593 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
594 if (blkdev->dev == NULL)
595 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
596 if (blkdev->devtype == NULL)
597 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
598
599 /* do we have all we need? */
600 if (blkdev->params == NULL ||
601 blkdev->mode == NULL ||
602 blkdev->type == NULL ||
603 blkdev->dev == NULL)
604 return -1;
605
606 /* read-only ? */
607 if (strcmp(blkdev->mode, "w") == 0) {
608 mode = O_RDWR;
609 qflags = BDRV_O_RDWR;
610 } else {
611 mode = O_RDONLY;
612 qflags = BDRV_O_RDONLY;
613 info |= VDISK_READONLY;
614 }
615
616 /* cdrom ? */
617 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom"))
618 info |= VDISK_CDROM;
619
620 /* init qemu block driver */
621 blkdev->index = (blkdev->xendev.dev - 202 * 256) / 16;
622 blkdev->index = drive_get_index(IF_XEN, 0, blkdev->index);
623 if (blkdev->index == -1) {
624 /* setup via xenbus -> create new block driver instance */
625 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
626 blkdev->bs = bdrv_new(blkdev->dev);
627 if (blkdev->bs) {
628 if (bdrv_open2(blkdev->bs, blkdev->filename, qflags,
629 bdrv_find_format(blkdev->fileproto)) != 0) {
630 bdrv_delete(blkdev->bs);
631 blkdev->bs = NULL;
632 }
633 }
634 if (!blkdev->bs)
635 return -1;
636 } else {
637 /* setup via qemu cmdline -> already setup for us */
638 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
639 blkdev->bs = drives_table[blkdev->index].bdrv;
640 }
641 blkdev->file_blk = BLOCK_SIZE;
642 blkdev->file_size = bdrv_getlength(blkdev->bs);
643 if (blkdev->file_size < 0) {
644 xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
645 (int)blkdev->file_size, strerror(-blkdev->file_size),
646 blkdev->bs->drv ? blkdev->bs->drv->format_name : "-");
647 blkdev->file_size = 0;
648 }
649 have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0;
650
651 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
652 " size %" PRId64 " (%" PRId64 " MB)\n",
653 blkdev->type, blkdev->fileproto, blkdev->filename,
654 blkdev->file_size, blkdev->file_size >> 20);
655
656 /* fill info */
657 xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers);
658 xenstore_write_be_int(&blkdev->xendev, "info", info);
659 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
660 xenstore_write_be_int(&blkdev->xendev, "sectors",
661 blkdev->file_size / blkdev->file_blk);
662 return 0;
663 }
664
665 static int blk_connect(struct XenDevice *xendev)
666 {
667 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
668
669 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1)
670 return -1;
671 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
672 &blkdev->xendev.remote_port) == -1)
673 return -1;
674
675 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
676 if (blkdev->xendev.protocol) {
677 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0)
678 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
679 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0)
680 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
681 }
682
683 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
684 blkdev->xendev.dom,
685 blkdev->ring_ref,
686 PROT_READ | PROT_WRITE);
687 if (!blkdev->sring)
688 return -1;
689 blkdev->cnt_map++;
690
691 switch (blkdev->protocol) {
692 case BLKIF_PROTOCOL_NATIVE:
693 {
694 blkif_sring_t *sring_native = blkdev->sring;
695 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
696 break;
697 }
698 case BLKIF_PROTOCOL_X86_32:
699 {
700 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
701 BACK_RING_INIT(&blkdev->rings.x86_32, sring_x86_32, XC_PAGE_SIZE);
702 break;
703 }
704 case BLKIF_PROTOCOL_X86_64:
705 {
706 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
707 BACK_RING_INIT(&blkdev->rings.x86_64, sring_x86_64, XC_PAGE_SIZE);
708 break;
709 }
710 }
711
712 xen_be_bind_evtchn(&blkdev->xendev);
713
714 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
715 "remote port %d, local port %d\n",
716 blkdev->xendev.protocol, blkdev->ring_ref,
717 blkdev->xendev.remote_port, blkdev->xendev.local_port);
718 return 0;
719 }
720
721 static void blk_disconnect(struct XenDevice *xendev)
722 {
723 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
724
725 if (blkdev->bs) {
726 if (blkdev->index == -1) {
727 /* close/delete only if we created it ourself */
728 bdrv_close(blkdev->bs);
729 bdrv_delete(blkdev->bs);
730 }
731 blkdev->bs = NULL;
732 }
733 xen_be_unbind_evtchn(&blkdev->xendev);
734
735 if (blkdev->sring) {
736 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
737 blkdev->cnt_map--;
738 blkdev->sring = NULL;
739 }
740 }
741
742 static int blk_free(struct XenDevice *xendev)
743 {
744 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
745 struct ioreq *ioreq;
746
747 while (!LIST_EMPTY(&blkdev->freelist)) {
748 ioreq = LIST_FIRST(&blkdev->freelist);
749 LIST_REMOVE(ioreq, list);
750 qemu_iovec_destroy(&ioreq->v);
751 qemu_free(ioreq);
752 }
753
754 qemu_free(blkdev->params);
755 qemu_free(blkdev->mode);
756 qemu_free(blkdev->type);
757 qemu_free(blkdev->dev);
758 qemu_free(blkdev->devtype);
759 qemu_bh_delete(blkdev->bh);
760 return 0;
761 }
762
763 static void blk_event(struct XenDevice *xendev)
764 {
765 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
766
767 qemu_bh_schedule(blkdev->bh);
768 }
769
770 struct XenDevOps xen_blkdev_ops = {
771 .size = sizeof(struct XenBlkDev),
772 .flags = DEVOPS_FLAG_NEED_GNTDEV,
773 .alloc = blk_alloc,
774 .init = blk_init,
775 .connect = blk_connect,
776 .disconnect = blk_disconnect,
777 .event = blk_event,
778 .free = blk_free,
779 };