]> git.proxmox.com Git - qemu.git/blame - hw/xen_disk.c
Merge remote-tracking branch 'spice/spice.v39' into staging
[qemu.git] / hw / xen_disk.c
CommitLineData
62d23efa
AL
1/*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
8167ee88 16 * with this program; if not, see <http://www.gnu.org/licenses/>.
62d23efa
AL
17 */
18
19#include <stdio.h>
20#include <stdlib.h>
21#include <stdarg.h>
22#include <string.h>
23#include <unistd.h>
24#include <signal.h>
25#include <inttypes.h>
26#include <time.h>
27#include <fcntl.h>
28#include <errno.h>
29#include <sys/ioctl.h>
30#include <sys/types.h>
31#include <sys/stat.h>
32#include <sys/mman.h>
33#include <sys/uio.h>
34
35#include <xs.h>
36#include <xenctrl.h>
37#include <xen/io/xenbus.h>
38
39#include "hw.h"
40#include "block_int.h"
41#include "qemu-char.h"
42#include "xen_blkif.h"
43#include "xen_backend.h"
2446333c 44#include "blockdev.h"
62d23efa
AL
45
46/* ------------------------------------------------------------- */
47
48static int syncwrite = 0;
49static int batch_maps = 0;
50
51static int max_requests = 32;
52static int use_aio = 1;
53
54/* ------------------------------------------------------------- */
55
56#define BLOCK_SIZE 512
57#define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
58
59struct ioreq {
60 blkif_request_t req;
61 int16_t status;
62
63 /* parsed request */
64 off_t start;
65 QEMUIOVector v;
66 int presync;
67 int postsync;
68
69 /* grant mapping */
70 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
71 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72 int prot;
73 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 void *pages;
75
76 /* aio status */
77 int aio_inflight;
78 int aio_errors;
79
80 struct XenBlkDev *blkdev;
72cf2d4f 81 QLIST_ENTRY(ioreq) list;
62d23efa
AL
82};
83
84struct XenBlkDev {
85 struct XenDevice xendev; /* must be first */
86 char *params;
87 char *mode;
88 char *type;
89 char *dev;
90 char *devtype;
91 const char *fileproto;
92 const char *filename;
93 int ring_ref;
94 void *sring;
95 int64_t file_blk;
96 int64_t file_size;
97 int protocol;
98 blkif_back_rings_t rings;
99 int more_work;
100 int cnt_map;
101
102 /* request lists */
72cf2d4f
BS
103 QLIST_HEAD(inflight_head, ioreq) inflight;
104 QLIST_HEAD(finished_head, ioreq) finished;
105 QLIST_HEAD(freelist_head, ioreq) freelist;
62d23efa
AL
106 int requests_total;
107 int requests_inflight;
108 int requests_finished;
109
110 /* qemu block driver */
751c6a17 111 DriveInfo *dinfo;
62d23efa
AL
112 BlockDriverState *bs;
113 QEMUBH *bh;
114};
115
116/* ------------------------------------------------------------- */
117
118static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
119{
120 struct ioreq *ioreq = NULL;
121
72cf2d4f 122 if (QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab
AP
123 if (blkdev->requests_total >= max_requests) {
124 goto out;
125 }
126 /* allocate new struct */
127 ioreq = qemu_mallocz(sizeof(*ioreq));
128 ioreq->blkdev = blkdev;
129 blkdev->requests_total++;
62d23efa
AL
130 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
131 } else {
209cd7ab
AP
132 /* get one from freelist */
133 ioreq = QLIST_FIRST(&blkdev->freelist);
134 QLIST_REMOVE(ioreq, list);
62d23efa
AL
135 qemu_iovec_reset(&ioreq->v);
136 }
72cf2d4f 137 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
62d23efa
AL
138 blkdev->requests_inflight++;
139
140out:
141 return ioreq;
142}
143
144static void ioreq_finish(struct ioreq *ioreq)
145{
146 struct XenBlkDev *blkdev = ioreq->blkdev;
147
72cf2d4f
BS
148 QLIST_REMOVE(ioreq, list);
149 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
62d23efa
AL
150 blkdev->requests_inflight--;
151 blkdev->requests_finished++;
152}
153
154static void ioreq_release(struct ioreq *ioreq)
155{
156 struct XenBlkDev *blkdev = ioreq->blkdev;
157
72cf2d4f 158 QLIST_REMOVE(ioreq, list);
62d23efa
AL
159 memset(ioreq, 0, sizeof(*ioreq));
160 ioreq->blkdev = blkdev;
72cf2d4f 161 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
62d23efa
AL
162 blkdev->requests_finished--;
163}
164
165/*
166 * translate request into iovec + start offset
167 * do sanity checks along the way
168 */
169static int ioreq_parse(struct ioreq *ioreq)
170{
171 struct XenBlkDev *blkdev = ioreq->blkdev;
172 uintptr_t mem;
173 size_t len;
174 int i;
175
176 xen_be_printf(&blkdev->xendev, 3,
209cd7ab
AP
177 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
178 ioreq->req.operation, ioreq->req.nr_segments,
179 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
62d23efa
AL
180 switch (ioreq->req.operation) {
181 case BLKIF_OP_READ:
209cd7ab
AP
182 ioreq->prot = PROT_WRITE; /* to memory */
183 break;
62d23efa 184 case BLKIF_OP_WRITE_BARRIER:
5cbdebe3
SS
185 if (!ioreq->req.nr_segments) {
186 ioreq->presync = 1;
187 return 0;
188 }
209cd7ab
AP
189 if (!syncwrite) {
190 ioreq->presync = ioreq->postsync = 1;
191 }
192 /* fall through */
62d23efa 193 case BLKIF_OP_WRITE:
209cd7ab
AP
194 ioreq->prot = PROT_READ; /* from memory */
195 if (syncwrite) {
196 ioreq->postsync = 1;
197 }
198 break;
62d23efa 199 default:
209cd7ab
AP
200 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
201 ioreq->req.operation);
202 goto err;
62d23efa
AL
203 };
204
908c7b9f
GH
205 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
206 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
207 goto err;
208 }
209
62d23efa
AL
210 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
211 for (i = 0; i < ioreq->req.nr_segments; i++) {
209cd7ab
AP
212 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
213 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
214 goto err;
215 }
216 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
217 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
218 goto err;
219 }
220 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
221 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
222 goto err;
223 }
224
225 ioreq->domids[i] = blkdev->xendev.dom;
226 ioreq->refs[i] = ioreq->req.seg[i].gref;
227
228 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
229 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
62d23efa
AL
230 qemu_iovec_add(&ioreq->v, (void*)mem, len);
231 }
232 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
209cd7ab
AP
233 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
234 goto err;
62d23efa
AL
235 }
236 return 0;
237
238err:
239 ioreq->status = BLKIF_RSP_ERROR;
240 return -1;
241}
242
243static void ioreq_unmap(struct ioreq *ioreq)
244{
d5b93ddf 245 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
62d23efa
AL
246 int i;
247
209cd7ab 248 if (ioreq->v.niov == 0) {
62d23efa 249 return;
209cd7ab 250 }
62d23efa 251 if (batch_maps) {
209cd7ab
AP
252 if (!ioreq->pages) {
253 return;
254 }
255 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
256 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
257 strerror(errno));
258 }
259 ioreq->blkdev->cnt_map -= ioreq->v.niov;
260 ioreq->pages = NULL;
62d23efa 261 } else {
209cd7ab
AP
262 for (i = 0; i < ioreq->v.niov; i++) {
263 if (!ioreq->page[i]) {
264 continue;
265 }
266 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
267 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
268 strerror(errno));
269 }
270 ioreq->blkdev->cnt_map--;
271 ioreq->page[i] = NULL;
272 }
62d23efa
AL
273 }
274}
275
276static int ioreq_map(struct ioreq *ioreq)
277{
d5b93ddf 278 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
62d23efa
AL
279 int i;
280
209cd7ab 281 if (ioreq->v.niov == 0) {
62d23efa 282 return 0;
209cd7ab 283 }
62d23efa 284 if (batch_maps) {
209cd7ab
AP
285 ioreq->pages = xc_gnttab_map_grant_refs
286 (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
287 if (ioreq->pages == NULL) {
288 xen_be_printf(&ioreq->blkdev->xendev, 0,
289 "can't map %d grant refs (%s, %d maps)\n",
290 ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
291 return -1;
292 }
293 for (i = 0; i < ioreq->v.niov; i++) {
294 ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
295 (uintptr_t)ioreq->v.iov[i].iov_base;
296 }
297 ioreq->blkdev->cnt_map += ioreq->v.niov;
62d23efa 298 } else {
209cd7ab
AP
299 for (i = 0; i < ioreq->v.niov; i++) {
300 ioreq->page[i] = xc_gnttab_map_grant_ref
301 (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
302 if (ioreq->page[i] == NULL) {
303 xen_be_printf(&ioreq->blkdev->xendev, 0,
304 "can't map grant ref %d (%s, %d maps)\n",
305 ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
306 ioreq_unmap(ioreq);
307 return -1;
308 }
309 ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
310 ioreq->blkdev->cnt_map++;
311 }
62d23efa
AL
312 }
313 return 0;
314}
315
316static int ioreq_runio_qemu_sync(struct ioreq *ioreq)
317{
318 struct XenBlkDev *blkdev = ioreq->blkdev;
1e71db30 319 int i, rc;
62d23efa
AL
320 off_t pos;
321
209cd7ab
AP
322 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
323 goto err_no_map;
324 }
325 if (ioreq->presync) {
326 bdrv_flush(blkdev->bs);
327 }
62d23efa
AL
328
329 switch (ioreq->req.operation) {
330 case BLKIF_OP_READ:
209cd7ab
AP
331 pos = ioreq->start;
332 for (i = 0; i < ioreq->v.niov; i++) {
333 rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE,
334 ioreq->v.iov[i].iov_base,
335 ioreq->v.iov[i].iov_len / BLOCK_SIZE);
336 if (rc != 0) {
337 xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n",
338 ioreq->v.iov[i].iov_base,
339 ioreq->v.iov[i].iov_len);
340 goto err;
341 }
209cd7ab
AP
342 pos += ioreq->v.iov[i].iov_len;
343 }
344 break;
62d23efa
AL
345 case BLKIF_OP_WRITE:
346 case BLKIF_OP_WRITE_BARRIER:
209cd7ab 347 if (!ioreq->req.nr_segments) {
5cbdebe3 348 break;
209cd7ab
AP
349 }
350 pos = ioreq->start;
351 for (i = 0; i < ioreq->v.niov; i++) {
352 rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE,
353 ioreq->v.iov[i].iov_base,
354 ioreq->v.iov[i].iov_len / BLOCK_SIZE);
355 if (rc != 0) {
356 xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n",
357 ioreq->v.iov[i].iov_base,
358 ioreq->v.iov[i].iov_len);
359 goto err;
360 }
209cd7ab
AP
361 pos += ioreq->v.iov[i].iov_len;
362 }
363 break;
62d23efa 364 default:
209cd7ab
AP
365 /* unknown operation (shouldn't happen -- parse catches this) */
366 goto err;
62d23efa
AL
367 }
368
209cd7ab
AP
369 if (ioreq->postsync) {
370 bdrv_flush(blkdev->bs);
371 }
62d23efa
AL
372 ioreq->status = BLKIF_RSP_OKAY;
373
374 ioreq_unmap(ioreq);
375 ioreq_finish(ioreq);
376 return 0;
377
378err:
f6ec953c
FZ
379 ioreq_unmap(ioreq);
380err_no_map:
381 ioreq_finish(ioreq);
62d23efa
AL
382 ioreq->status = BLKIF_RSP_ERROR;
383 return -1;
384}
385
386static void qemu_aio_complete(void *opaque, int ret)
387{
388 struct ioreq *ioreq = opaque;
389
390 if (ret != 0) {
391 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
392 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
393 ioreq->aio_errors++;
394 }
395
396 ioreq->aio_inflight--;
209cd7ab 397 if (ioreq->aio_inflight > 0) {
62d23efa 398 return;
209cd7ab 399 }
62d23efa
AL
400
401 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
402 ioreq_unmap(ioreq);
403 ioreq_finish(ioreq);
404 qemu_bh_schedule(ioreq->blkdev->bh);
405}
406
407static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
408{
409 struct XenBlkDev *blkdev = ioreq->blkdev;
410
209cd7ab
AP
411 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
412 goto err_no_map;
413 }
62d23efa
AL
414
415 ioreq->aio_inflight++;
209cd7ab
AP
416 if (ioreq->presync) {
417 bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
418 }
62d23efa
AL
419
420 switch (ioreq->req.operation) {
421 case BLKIF_OP_READ:
422 ioreq->aio_inflight++;
423 bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
424 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
425 qemu_aio_complete, ioreq);
209cd7ab 426 break;
62d23efa
AL
427 case BLKIF_OP_WRITE:
428 case BLKIF_OP_WRITE_BARRIER:
209cd7ab 429 if (!ioreq->req.nr_segments) {
5cbdebe3 430 break;
209cd7ab 431 }
209bef3e 432 ioreq->aio_inflight++;
62d23efa
AL
433 bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
434 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
435 qemu_aio_complete, ioreq);
209cd7ab 436 break;
62d23efa 437 default:
209cd7ab
AP
438 /* unknown operation (shouldn't happen -- parse catches this) */
439 goto err;
62d23efa
AL
440 }
441
209cd7ab
AP
442 if (ioreq->postsync) {
443 bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
444 }
62d23efa
AL
445 qemu_aio_complete(ioreq, 0);
446
447 return 0;
448
449err:
f6ec953c
FZ
450 ioreq_unmap(ioreq);
451err_no_map:
452 ioreq_finish(ioreq);
62d23efa
AL
453 ioreq->status = BLKIF_RSP_ERROR;
454 return -1;
455}
456
457static int blk_send_response_one(struct ioreq *ioreq)
458{
459 struct XenBlkDev *blkdev = ioreq->blkdev;
460 int send_notify = 0;
461 int have_requests = 0;
462 blkif_response_t resp;
463 void *dst;
464
465 resp.id = ioreq->req.id;
466 resp.operation = ioreq->req.operation;
467 resp.status = ioreq->status;
468
469 /* Place on the response ring for the relevant domain. */
470 switch (blkdev->protocol) {
471 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
472 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
473 break;
62d23efa 474 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
475 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
476 blkdev->rings.x86_32_part.rsp_prod_pvt);
209cd7ab 477 break;
62d23efa 478 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
479 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
480 blkdev->rings.x86_64_part.rsp_prod_pvt);
209cd7ab 481 break;
62d23efa 482 default:
209cd7ab 483 dst = NULL;
62d23efa
AL
484 }
485 memcpy(dst, &resp, sizeof(resp));
486 blkdev->rings.common.rsp_prod_pvt++;
487
488 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
489 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
209cd7ab
AP
490 /*
491 * Tail check for pending requests. Allows frontend to avoid
492 * notifications if requests are already in flight (lower
493 * overheads and promotes batching).
494 */
495 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
62d23efa 496 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
209cd7ab 497 have_requests = 1;
62d23efa
AL
498 }
499
209cd7ab
AP
500 if (have_requests) {
501 blkdev->more_work++;
502 }
62d23efa
AL
503 return send_notify;
504}
505
506/* walk finished list, send outstanding responses, free requests */
507static void blk_send_response_all(struct XenBlkDev *blkdev)
508{
509 struct ioreq *ioreq;
510 int send_notify = 0;
511
72cf2d4f
BS
512 while (!QLIST_EMPTY(&blkdev->finished)) {
513 ioreq = QLIST_FIRST(&blkdev->finished);
209cd7ab
AP
514 send_notify += blk_send_response_one(ioreq);
515 ioreq_release(ioreq);
516 }
517 if (send_notify) {
518 xen_be_send_notify(&blkdev->xendev);
62d23efa 519 }
62d23efa
AL
520}
521
522static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
523{
524 switch (blkdev->protocol) {
525 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
526 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
527 sizeof(ioreq->req));
528 break;
62d23efa 529 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
530 blkif_get_x86_32_req(&ioreq->req,
531 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
209cd7ab 532 break;
62d23efa 533 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
534 blkif_get_x86_64_req(&ioreq->req,
535 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
209cd7ab 536 break;
62d23efa
AL
537 }
538 return 0;
539}
540
541static void blk_handle_requests(struct XenBlkDev *blkdev)
542{
543 RING_IDX rc, rp;
544 struct ioreq *ioreq;
545
546 blkdev->more_work = 0;
547
548 rc = blkdev->rings.common.req_cons;
549 rp = blkdev->rings.common.sring->req_prod;
550 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
551
209cd7ab 552 if (use_aio) {
62d23efa 553 blk_send_response_all(blkdev);
209cd7ab 554 }
fc1f79f7 555 while (rc != rp) {
62d23efa 556 /* pull request from ring */
209cd7ab 557 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
62d23efa 558 break;
209cd7ab 559 }
62d23efa
AL
560 ioreq = ioreq_start(blkdev);
561 if (ioreq == NULL) {
562 blkdev->more_work++;
563 break;
564 }
565 blk_get_request(blkdev, ioreq, rc);
566 blkdev->rings.common.req_cons = ++rc;
567
568 /* parse them */
569 if (ioreq_parse(ioreq) != 0) {
209cd7ab 570 if (blk_send_response_one(ioreq)) {
62d23efa 571 xen_be_send_notify(&blkdev->xendev);
209cd7ab 572 }
62d23efa
AL
573 ioreq_release(ioreq);
574 continue;
575 }
576
577 if (use_aio) {
578 /* run i/o in aio mode */
579 ioreq_runio_qemu_aio(ioreq);
580 } else {
581 /* run i/o in sync mode */
582 ioreq_runio_qemu_sync(ioreq);
583 }
584 }
209cd7ab 585 if (!use_aio) {
62d23efa 586 blk_send_response_all(blkdev);
209cd7ab 587 }
62d23efa 588
209cd7ab 589 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
62d23efa 590 qemu_bh_schedule(blkdev->bh);
209cd7ab 591 }
62d23efa
AL
592}
593
594/* ------------------------------------------------------------- */
595
596static void blk_bh(void *opaque)
597{
598 struct XenBlkDev *blkdev = opaque;
599 blk_handle_requests(blkdev);
600}
601
602static void blk_alloc(struct XenDevice *xendev)
603{
604 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
605
72cf2d4f
BS
606 QLIST_INIT(&blkdev->inflight);
607 QLIST_INIT(&blkdev->finished);
608 QLIST_INIT(&blkdev->freelist);
62d23efa 609 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
209cd7ab 610 if (xen_mode != XEN_EMULATE) {
62d23efa 611 batch_maps = 1;
209cd7ab 612 }
62d23efa
AL
613}
614
615static int blk_init(struct XenDevice *xendev)
616{
617 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
9678d950 618 int index, qflags, have_barriers, info = 0;
62d23efa
AL
619
620 /* read xenstore entries */
621 if (blkdev->params == NULL) {
5ea3c2b4 622 char *h = NULL;
209cd7ab 623 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
5ea3c2b4
SS
624 if (blkdev->params != NULL) {
625 h = strchr(blkdev->params, ':');
626 }
209cd7ab
AP
627 if (h != NULL) {
628 blkdev->fileproto = blkdev->params;
629 blkdev->filename = h+1;
630 *h = 0;
631 } else {
632 blkdev->fileproto = "<unset>";
633 blkdev->filename = blkdev->params;
634 }
635 }
7cef3f4f
SS
636 if (!strcmp("aio", blkdev->fileproto)) {
637 blkdev->fileproto = "raw";
638 }
209cd7ab
AP
639 if (blkdev->mode == NULL) {
640 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
641 }
642 if (blkdev->type == NULL) {
643 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
644 }
645 if (blkdev->dev == NULL) {
646 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
647 }
648 if (blkdev->devtype == NULL) {
649 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
650 }
62d23efa
AL
651
652 /* do we have all we need? */
653 if (blkdev->params == NULL ||
209cd7ab
AP
654 blkdev->mode == NULL ||
655 blkdev->type == NULL ||
656 blkdev->dev == NULL) {
5ea3c2b4 657 goto out_error;
209cd7ab 658 }
62d23efa
AL
659
660 /* read-only ? */
661 if (strcmp(blkdev->mode, "w") == 0) {
209cd7ab 662 qflags = BDRV_O_RDWR;
62d23efa 663 } else {
209cd7ab
AP
664 qflags = 0;
665 info |= VDISK_READONLY;
62d23efa
AL
666 }
667
668 /* cdrom ? */
209cd7ab
AP
669 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
670 info |= VDISK_CDROM;
671 }
62d23efa
AL
672
673 /* init qemu block driver */
751c6a17
GH
674 index = (blkdev->xendev.dev - 202 * 256) / 16;
675 blkdev->dinfo = drive_get(IF_XEN, 0, index);
676 if (!blkdev->dinfo) {
62d23efa
AL
677 /* setup via xenbus -> create new block driver instance */
678 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
ad717139 679 blkdev->bs = bdrv_new(blkdev->dev);
5ea3c2b4
SS
680 if (blkdev->bs) {
681 if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
682 bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
683 bdrv_delete(blkdev->bs);
684 blkdev->bs = NULL;
685 }
686 }
687 if (!blkdev->bs) {
688 goto out_error;
ad717139 689 }
62d23efa
AL
690 } else {
691 /* setup via qemu cmdline -> already setup for us */
692 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
209cd7ab 693 blkdev->bs = blkdev->dinfo->bdrv;
62d23efa
AL
694 }
695 blkdev->file_blk = BLOCK_SIZE;
696 blkdev->file_size = bdrv_getlength(blkdev->bs);
697 if (blkdev->file_size < 0) {
698 xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
699 (int)blkdev->file_size, strerror(-blkdev->file_size),
700 blkdev->bs->drv ? blkdev->bs->drv->format_name : "-");
209cd7ab 701 blkdev->file_size = 0;
62d23efa
AL
702 }
703 have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0;
704
705 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
209cd7ab
AP
706 " size %" PRId64 " (%" PRId64 " MB)\n",
707 blkdev->type, blkdev->fileproto, blkdev->filename,
708 blkdev->file_size, blkdev->file_size >> 20);
62d23efa
AL
709
710 /* fill info */
711 xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers);
712 xenstore_write_be_int(&blkdev->xendev, "info", info);
713 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
714 xenstore_write_be_int(&blkdev->xendev, "sectors",
209cd7ab 715 blkdev->file_size / blkdev->file_blk);
62d23efa 716 return 0;
5ea3c2b4
SS
717
718out_error:
719 qemu_free(blkdev->params);
720 blkdev->params = NULL;
721 qemu_free(blkdev->mode);
722 blkdev->mode = NULL;
723 qemu_free(blkdev->type);
724 blkdev->type = NULL;
725 qemu_free(blkdev->dev);
726 blkdev->dev = NULL;
727 qemu_free(blkdev->devtype);
728 blkdev->devtype = NULL;
729 return -1;
62d23efa
AL
730}
731
732static int blk_connect(struct XenDevice *xendev)
733{
734 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
735
209cd7ab
AP
736 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
737 return -1;
738 }
62d23efa 739 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
209cd7ab
AP
740 &blkdev->xendev.remote_port) == -1) {
741 return -1;
742 }
62d23efa
AL
743
744 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
745 if (blkdev->xendev.protocol) {
209cd7ab 746 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
62d23efa 747 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
209cd7ab
AP
748 }
749 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
62d23efa 750 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
209cd7ab 751 }
62d23efa
AL
752 }
753
754 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
209cd7ab
AP
755 blkdev->xendev.dom,
756 blkdev->ring_ref,
757 PROT_READ | PROT_WRITE);
758 if (!blkdev->sring) {
759 return -1;
760 }
62d23efa
AL
761 blkdev->cnt_map++;
762
763 switch (blkdev->protocol) {
764 case BLKIF_PROTOCOL_NATIVE:
765 {
209cd7ab
AP
766 blkif_sring_t *sring_native = blkdev->sring;
767 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
768 break;
62d23efa
AL
769 }
770 case BLKIF_PROTOCOL_X86_32:
771 {
209cd7ab 772 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
6fcfeff9
BS
773
774 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
209cd7ab 775 break;
62d23efa
AL
776 }
777 case BLKIF_PROTOCOL_X86_64:
778 {
209cd7ab 779 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
6fcfeff9
BS
780
781 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
209cd7ab 782 break;
62d23efa
AL
783 }
784 }
785
786 xen_be_bind_evtchn(&blkdev->xendev);
787
788 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
209cd7ab
AP
789 "remote port %d, local port %d\n",
790 blkdev->xendev.protocol, blkdev->ring_ref,
791 blkdev->xendev.remote_port, blkdev->xendev.local_port);
62d23efa
AL
792 return 0;
793}
794
795static void blk_disconnect(struct XenDevice *xendev)
796{
797 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
798
799 if (blkdev->bs) {
751c6a17 800 if (!blkdev->dinfo) {
62d23efa
AL
801 /* close/delete only if we created it ourself */
802 bdrv_close(blkdev->bs);
803 bdrv_delete(blkdev->bs);
804 }
209cd7ab 805 blkdev->bs = NULL;
62d23efa
AL
806 }
807 xen_be_unbind_evtchn(&blkdev->xendev);
808
809 if (blkdev->sring) {
209cd7ab
AP
810 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
811 blkdev->cnt_map--;
812 blkdev->sring = NULL;
62d23efa
AL
813 }
814}
815
816static int blk_free(struct XenDevice *xendev)
817{
818 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
819 struct ioreq *ioreq;
820
72cf2d4f 821 while (!QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab 822 ioreq = QLIST_FIRST(&blkdev->freelist);
72cf2d4f 823 QLIST_REMOVE(ioreq, list);
62d23efa 824 qemu_iovec_destroy(&ioreq->v);
209cd7ab 825 qemu_free(ioreq);
62d23efa
AL
826 }
827
828 qemu_free(blkdev->params);
829 qemu_free(blkdev->mode);
830 qemu_free(blkdev->type);
831 qemu_free(blkdev->dev);
832 qemu_free(blkdev->devtype);
833 qemu_bh_delete(blkdev->bh);
834 return 0;
835}
836
837static void blk_event(struct XenDevice *xendev)
838{
839 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
840
841 qemu_bh_schedule(blkdev->bh);
842}
843
844struct XenDevOps xen_blkdev_ops = {
845 .size = sizeof(struct XenBlkDev),
846 .flags = DEVOPS_FLAG_NEED_GNTDEV,
847 .alloc = blk_alloc,
848 .init = blk_init,
849 .connect = blk_connect,
850 .disconnect = blk_disconnect,
851 .event = blk_event,
852 .free = blk_free,
853};