]> git.proxmox.com Git - mirror_qemu.git/blob - hw/block/xen_disk.c
blockdev: Split monitor reference from BB creation
[mirror_qemu.git] / hw / block / xen_disk.c
1 /*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
20 */
21
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
24 #include <sys/mman.h>
25 #include <sys/uio.h>
26
27 #include "hw/hw.h"
28 #include "hw/xen/xen_backend.h"
29 #include "xen_blkif.h"
30 #include "sysemu/blockdev.h"
31 #include "sysemu/block-backend.h"
32 #include "qapi/qmp/qdict.h"
33 #include "qapi/qmp/qstring.h"
34
35 /* ------------------------------------------------------------- */
36
37 static int batch_maps = 0;
38
39 static int max_requests = 32;
40
41 /* ------------------------------------------------------------- */
42
43 #define BLOCK_SIZE 512
44 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
45
46 struct PersistentGrant {
47 void *page;
48 struct XenBlkDev *blkdev;
49 };
50
51 typedef struct PersistentGrant PersistentGrant;
52
53 struct PersistentRegion {
54 void *addr;
55 int num;
56 };
57
58 typedef struct PersistentRegion PersistentRegion;
59
60 struct ioreq {
61 blkif_request_t req;
62 int16_t status;
63
64 /* parsed request */
65 off_t start;
66 QEMUIOVector v;
67 int presync;
68 uint8_t mapped;
69
70 /* grant mapping */
71 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73 int prot;
74 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75 void *pages;
76 int num_unmap;
77
78 /* aio status */
79 int aio_inflight;
80 int aio_errors;
81
82 struct XenBlkDev *blkdev;
83 QLIST_ENTRY(ioreq) list;
84 BlockAcctCookie acct;
85 };
86
87 struct XenBlkDev {
88 struct XenDevice xendev; /* must be first */
89 char *params;
90 char *mode;
91 char *type;
92 char *dev;
93 char *devtype;
94 bool directiosafe;
95 const char *fileproto;
96 const char *filename;
97 int ring_ref;
98 void *sring;
99 int64_t file_blk;
100 int64_t file_size;
101 int protocol;
102 blkif_back_rings_t rings;
103 int more_work;
104 int cnt_map;
105
106 /* request lists */
107 QLIST_HEAD(inflight_head, ioreq) inflight;
108 QLIST_HEAD(finished_head, ioreq) finished;
109 QLIST_HEAD(freelist_head, ioreq) freelist;
110 int requests_total;
111 int requests_inflight;
112 int requests_finished;
113
114 /* Persistent grants extension */
115 gboolean feature_discard;
116 gboolean feature_persistent;
117 GTree *persistent_gnts;
118 GSList *persistent_regions;
119 unsigned int persistent_gnt_count;
120 unsigned int max_grants;
121
122 /* qemu block driver */
123 DriveInfo *dinfo;
124 BlockBackend *blk;
125 QEMUBH *bh;
126 };
127
128 /* ------------------------------------------------------------- */
129
130 static void ioreq_reset(struct ioreq *ioreq)
131 {
132 memset(&ioreq->req, 0, sizeof(ioreq->req));
133 ioreq->status = 0;
134 ioreq->start = 0;
135 ioreq->presync = 0;
136 ioreq->mapped = 0;
137
138 memset(ioreq->domids, 0, sizeof(ioreq->domids));
139 memset(ioreq->refs, 0, sizeof(ioreq->refs));
140 ioreq->prot = 0;
141 memset(ioreq->page, 0, sizeof(ioreq->page));
142 ioreq->pages = NULL;
143
144 ioreq->aio_inflight = 0;
145 ioreq->aio_errors = 0;
146
147 ioreq->blkdev = NULL;
148 memset(&ioreq->list, 0, sizeof(ioreq->list));
149 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
150
151 qemu_iovec_reset(&ioreq->v);
152 }
153
154 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
155 {
156 uint ua = GPOINTER_TO_UINT(a);
157 uint ub = GPOINTER_TO_UINT(b);
158 return (ua > ub) - (ua < ub);
159 }
160
161 static void destroy_grant(gpointer pgnt)
162 {
163 PersistentGrant *grant = pgnt;
164 xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
165
166 if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
167 xen_be_printf(&grant->blkdev->xendev, 0,
168 "xengnttab_unmap failed: %s\n",
169 strerror(errno));
170 }
171 grant->blkdev->persistent_gnt_count--;
172 xen_be_printf(&grant->blkdev->xendev, 3,
173 "unmapped grant %p\n", grant->page);
174 g_free(grant);
175 }
176
177 static void remove_persistent_region(gpointer data, gpointer dev)
178 {
179 PersistentRegion *region = data;
180 struct XenBlkDev *blkdev = dev;
181 xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
182
183 if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
184 xen_be_printf(&blkdev->xendev, 0,
185 "xengnttab_unmap region %p failed: %s\n",
186 region->addr, strerror(errno));
187 }
188 xen_be_printf(&blkdev->xendev, 3,
189 "unmapped grant region %p with %d pages\n",
190 region->addr, region->num);
191 g_free(region);
192 }
193
194 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
195 {
196 struct ioreq *ioreq = NULL;
197
198 if (QLIST_EMPTY(&blkdev->freelist)) {
199 if (blkdev->requests_total >= max_requests) {
200 goto out;
201 }
202 /* allocate new struct */
203 ioreq = g_malloc0(sizeof(*ioreq));
204 ioreq->blkdev = blkdev;
205 blkdev->requests_total++;
206 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
207 } else {
208 /* get one from freelist */
209 ioreq = QLIST_FIRST(&blkdev->freelist);
210 QLIST_REMOVE(ioreq, list);
211 }
212 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
213 blkdev->requests_inflight++;
214
215 out:
216 return ioreq;
217 }
218
219 static void ioreq_finish(struct ioreq *ioreq)
220 {
221 struct XenBlkDev *blkdev = ioreq->blkdev;
222
223 QLIST_REMOVE(ioreq, list);
224 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
225 blkdev->requests_inflight--;
226 blkdev->requests_finished++;
227 }
228
229 static void ioreq_release(struct ioreq *ioreq, bool finish)
230 {
231 struct XenBlkDev *blkdev = ioreq->blkdev;
232
233 QLIST_REMOVE(ioreq, list);
234 ioreq_reset(ioreq);
235 ioreq->blkdev = blkdev;
236 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
237 if (finish) {
238 blkdev->requests_finished--;
239 } else {
240 blkdev->requests_inflight--;
241 }
242 }
243
244 /*
245 * translate request into iovec + start offset
246 * do sanity checks along the way
247 */
248 static int ioreq_parse(struct ioreq *ioreq)
249 {
250 struct XenBlkDev *blkdev = ioreq->blkdev;
251 uintptr_t mem;
252 size_t len;
253 int i;
254
255 xen_be_printf(&blkdev->xendev, 3,
256 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
257 ioreq->req.operation, ioreq->req.nr_segments,
258 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
259 switch (ioreq->req.operation) {
260 case BLKIF_OP_READ:
261 ioreq->prot = PROT_WRITE; /* to memory */
262 break;
263 case BLKIF_OP_FLUSH_DISKCACHE:
264 ioreq->presync = 1;
265 if (!ioreq->req.nr_segments) {
266 return 0;
267 }
268 /* fall through */
269 case BLKIF_OP_WRITE:
270 ioreq->prot = PROT_READ; /* from memory */
271 break;
272 case BLKIF_OP_DISCARD:
273 return 0;
274 default:
275 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
276 ioreq->req.operation);
277 goto err;
278 };
279
280 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
281 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
282 goto err;
283 }
284
285 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
286 for (i = 0; i < ioreq->req.nr_segments; i++) {
287 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
288 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
289 goto err;
290 }
291 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
292 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
293 goto err;
294 }
295 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
296 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
297 goto err;
298 }
299
300 ioreq->domids[i] = blkdev->xendev.dom;
301 ioreq->refs[i] = ioreq->req.seg[i].gref;
302
303 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
304 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
305 qemu_iovec_add(&ioreq->v, (void*)mem, len);
306 }
307 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
308 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
309 goto err;
310 }
311 return 0;
312
313 err:
314 ioreq->status = BLKIF_RSP_ERROR;
315 return -1;
316 }
317
318 static void ioreq_unmap(struct ioreq *ioreq)
319 {
320 xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
321 int i;
322
323 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
324 return;
325 }
326 if (batch_maps) {
327 if (!ioreq->pages) {
328 return;
329 }
330 if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
331 xen_be_printf(&ioreq->blkdev->xendev, 0,
332 "xengnttab_unmap failed: %s\n",
333 strerror(errno));
334 }
335 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
336 ioreq->pages = NULL;
337 } else {
338 for (i = 0; i < ioreq->num_unmap; i++) {
339 if (!ioreq->page[i]) {
340 continue;
341 }
342 if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
343 xen_be_printf(&ioreq->blkdev->xendev, 0,
344 "xengnttab_unmap failed: %s\n",
345 strerror(errno));
346 }
347 ioreq->blkdev->cnt_map--;
348 ioreq->page[i] = NULL;
349 }
350 }
351 ioreq->mapped = 0;
352 }
353
354 static int ioreq_map(struct ioreq *ioreq)
355 {
356 xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
357 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
358 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
359 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
360 int i, j, new_maps = 0;
361 PersistentGrant *grant;
362 PersistentRegion *region;
363 /* domids and refs variables will contain the information necessary
364 * to map the grants that are needed to fulfill this request.
365 *
366 * After mapping the needed grants, the page array will contain the
367 * memory address of each granted page in the order specified in ioreq
368 * (disregarding if it's a persistent grant or not).
369 */
370
371 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
372 return 0;
373 }
374 if (ioreq->blkdev->feature_persistent) {
375 for (i = 0; i < ioreq->v.niov; i++) {
376 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
377 GUINT_TO_POINTER(ioreq->refs[i]));
378
379 if (grant != NULL) {
380 page[i] = grant->page;
381 xen_be_printf(&ioreq->blkdev->xendev, 3,
382 "using persistent-grant %" PRIu32 "\n",
383 ioreq->refs[i]);
384 } else {
385 /* Add the grant to the list of grants that
386 * should be mapped
387 */
388 domids[new_maps] = ioreq->domids[i];
389 refs[new_maps] = ioreq->refs[i];
390 page[i] = NULL;
391 new_maps++;
392 }
393 }
394 /* Set the protection to RW, since grants may be reused later
395 * with a different protection than the one needed for this request
396 */
397 ioreq->prot = PROT_WRITE | PROT_READ;
398 } else {
399 /* All grants in the request should be mapped */
400 memcpy(refs, ioreq->refs, sizeof(refs));
401 memcpy(domids, ioreq->domids, sizeof(domids));
402 memset(page, 0, sizeof(page));
403 new_maps = ioreq->v.niov;
404 }
405
406 if (batch_maps && new_maps) {
407 ioreq->pages = xengnttab_map_grant_refs
408 (gnt, new_maps, domids, refs, ioreq->prot);
409 if (ioreq->pages == NULL) {
410 xen_be_printf(&ioreq->blkdev->xendev, 0,
411 "can't map %d grant refs (%s, %d maps)\n",
412 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
413 return -1;
414 }
415 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
416 if (page[i] == NULL) {
417 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
418 }
419 }
420 ioreq->blkdev->cnt_map += new_maps;
421 } else if (new_maps) {
422 for (i = 0; i < new_maps; i++) {
423 ioreq->page[i] = xengnttab_map_grant_ref
424 (gnt, domids[i], refs[i], ioreq->prot);
425 if (ioreq->page[i] == NULL) {
426 xen_be_printf(&ioreq->blkdev->xendev, 0,
427 "can't map grant ref %d (%s, %d maps)\n",
428 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
429 ioreq->mapped = 1;
430 ioreq_unmap(ioreq);
431 return -1;
432 }
433 ioreq->blkdev->cnt_map++;
434 }
435 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
436 if (page[i] == NULL) {
437 page[i] = ioreq->page[j++];
438 }
439 }
440 }
441 if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
442 (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
443 ioreq->blkdev->max_grants))) {
444 /*
445 * If we are using persistent grants and batch mappings only
446 * add the new maps to the list of persistent grants if the whole
447 * area can be persistently mapped.
448 */
449 if (batch_maps) {
450 region = g_malloc0(sizeof(*region));
451 region->addr = ioreq->pages;
452 region->num = new_maps;
453 ioreq->blkdev->persistent_regions = g_slist_append(
454 ioreq->blkdev->persistent_regions,
455 region);
456 }
457 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
458 && new_maps) {
459 /* Go through the list of newly mapped grants and add as many
460 * as possible to the list of persistently mapped grants.
461 *
462 * Since we start at the end of ioreq->page(s), we only need
463 * to decrease new_maps to prevent this granted pages from
464 * being unmapped in ioreq_unmap.
465 */
466 grant = g_malloc0(sizeof(*grant));
467 new_maps--;
468 if (batch_maps) {
469 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
470 } else {
471 grant->page = ioreq->page[new_maps];
472 }
473 grant->blkdev = ioreq->blkdev;
474 xen_be_printf(&ioreq->blkdev->xendev, 3,
475 "adding grant %" PRIu32 " page: %p\n",
476 refs[new_maps], grant->page);
477 g_tree_insert(ioreq->blkdev->persistent_gnts,
478 GUINT_TO_POINTER(refs[new_maps]),
479 grant);
480 ioreq->blkdev->persistent_gnt_count++;
481 }
482 assert(!batch_maps || new_maps == 0);
483 }
484 for (i = 0; i < ioreq->v.niov; i++) {
485 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
486 }
487 ioreq->mapped = 1;
488 ioreq->num_unmap = new_maps;
489 return 0;
490 }
491
492 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
493
494 static void qemu_aio_complete(void *opaque, int ret)
495 {
496 struct ioreq *ioreq = opaque;
497
498 if (ret != 0) {
499 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
500 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
501 ioreq->aio_errors++;
502 }
503
504 ioreq->aio_inflight--;
505 if (ioreq->presync) {
506 ioreq->presync = 0;
507 ioreq_runio_qemu_aio(ioreq);
508 return;
509 }
510 if (ioreq->aio_inflight > 0) {
511 return;
512 }
513
514 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
515 ioreq_unmap(ioreq);
516 ioreq_finish(ioreq);
517 switch (ioreq->req.operation) {
518 case BLKIF_OP_WRITE:
519 case BLKIF_OP_FLUSH_DISKCACHE:
520 if (!ioreq->req.nr_segments) {
521 break;
522 }
523 case BLKIF_OP_READ:
524 if (ioreq->status == BLKIF_RSP_OKAY) {
525 block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
526 } else {
527 block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
528 }
529 break;
530 case BLKIF_OP_DISCARD:
531 default:
532 break;
533 }
534 qemu_bh_schedule(ioreq->blkdev->bh);
535 }
536
537 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
538 {
539 struct XenBlkDev *blkdev = ioreq->blkdev;
540
541 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
542 goto err_no_map;
543 }
544
545 ioreq->aio_inflight++;
546 if (ioreq->presync) {
547 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
548 return 0;
549 }
550
551 switch (ioreq->req.operation) {
552 case BLKIF_OP_READ:
553 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
554 ioreq->v.size, BLOCK_ACCT_READ);
555 ioreq->aio_inflight++;
556 blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE,
557 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
558 qemu_aio_complete, ioreq);
559 break;
560 case BLKIF_OP_WRITE:
561 case BLKIF_OP_FLUSH_DISKCACHE:
562 if (!ioreq->req.nr_segments) {
563 break;
564 }
565
566 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
567 ioreq->v.size,
568 ioreq->req.operation == BLKIF_OP_WRITE ?
569 BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
570 ioreq->aio_inflight++;
571 blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
572 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
573 qemu_aio_complete, ioreq);
574 break;
575 case BLKIF_OP_DISCARD:
576 {
577 struct blkif_request_discard *discard_req = (void *)&ioreq->req;
578 ioreq->aio_inflight++;
579 blk_aio_discard(blkdev->blk,
580 discard_req->sector_number, discard_req->nr_sectors,
581 qemu_aio_complete, ioreq);
582 break;
583 }
584 default:
585 /* unknown operation (shouldn't happen -- parse catches this) */
586 goto err;
587 }
588
589 qemu_aio_complete(ioreq, 0);
590
591 return 0;
592
593 err:
594 ioreq_unmap(ioreq);
595 err_no_map:
596 ioreq_finish(ioreq);
597 ioreq->status = BLKIF_RSP_ERROR;
598 return -1;
599 }
600
601 static int blk_send_response_one(struct ioreq *ioreq)
602 {
603 struct XenBlkDev *blkdev = ioreq->blkdev;
604 int send_notify = 0;
605 int have_requests = 0;
606 blkif_response_t resp;
607 void *dst;
608
609 resp.id = ioreq->req.id;
610 resp.operation = ioreq->req.operation;
611 resp.status = ioreq->status;
612
613 /* Place on the response ring for the relevant domain. */
614 switch (blkdev->protocol) {
615 case BLKIF_PROTOCOL_NATIVE:
616 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
617 break;
618 case BLKIF_PROTOCOL_X86_32:
619 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
620 blkdev->rings.x86_32_part.rsp_prod_pvt);
621 break;
622 case BLKIF_PROTOCOL_X86_64:
623 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
624 blkdev->rings.x86_64_part.rsp_prod_pvt);
625 break;
626 default:
627 dst = NULL;
628 return 0;
629 }
630 memcpy(dst, &resp, sizeof(resp));
631 blkdev->rings.common.rsp_prod_pvt++;
632
633 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
634 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
635 /*
636 * Tail check for pending requests. Allows frontend to avoid
637 * notifications if requests are already in flight (lower
638 * overheads and promotes batching).
639 */
640 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
641 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
642 have_requests = 1;
643 }
644
645 if (have_requests) {
646 blkdev->more_work++;
647 }
648 return send_notify;
649 }
650
651 /* walk finished list, send outstanding responses, free requests */
652 static void blk_send_response_all(struct XenBlkDev *blkdev)
653 {
654 struct ioreq *ioreq;
655 int send_notify = 0;
656
657 while (!QLIST_EMPTY(&blkdev->finished)) {
658 ioreq = QLIST_FIRST(&blkdev->finished);
659 send_notify += blk_send_response_one(ioreq);
660 ioreq_release(ioreq, true);
661 }
662 if (send_notify) {
663 xen_be_send_notify(&blkdev->xendev);
664 }
665 }
666
667 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
668 {
669 switch (blkdev->protocol) {
670 case BLKIF_PROTOCOL_NATIVE:
671 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
672 sizeof(ioreq->req));
673 break;
674 case BLKIF_PROTOCOL_X86_32:
675 blkif_get_x86_32_req(&ioreq->req,
676 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
677 break;
678 case BLKIF_PROTOCOL_X86_64:
679 blkif_get_x86_64_req(&ioreq->req,
680 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
681 break;
682 }
683 return 0;
684 }
685
686 static void blk_handle_requests(struct XenBlkDev *blkdev)
687 {
688 RING_IDX rc, rp;
689 struct ioreq *ioreq;
690
691 blkdev->more_work = 0;
692
693 rc = blkdev->rings.common.req_cons;
694 rp = blkdev->rings.common.sring->req_prod;
695 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
696
697 blk_send_response_all(blkdev);
698 while (rc != rp) {
699 /* pull request from ring */
700 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
701 break;
702 }
703 ioreq = ioreq_start(blkdev);
704 if (ioreq == NULL) {
705 blkdev->more_work++;
706 break;
707 }
708 blk_get_request(blkdev, ioreq, rc);
709 blkdev->rings.common.req_cons = ++rc;
710
711 /* parse them */
712 if (ioreq_parse(ioreq) != 0) {
713
714 switch (ioreq->req.operation) {
715 case BLKIF_OP_READ:
716 block_acct_invalid(blk_get_stats(blkdev->blk),
717 BLOCK_ACCT_READ);
718 break;
719 case BLKIF_OP_WRITE:
720 block_acct_invalid(blk_get_stats(blkdev->blk),
721 BLOCK_ACCT_WRITE);
722 break;
723 case BLKIF_OP_FLUSH_DISKCACHE:
724 block_acct_invalid(blk_get_stats(blkdev->blk),
725 BLOCK_ACCT_FLUSH);
726 default:
727 break;
728 };
729
730 if (blk_send_response_one(ioreq)) {
731 xen_be_send_notify(&blkdev->xendev);
732 }
733 ioreq_release(ioreq, false);
734 continue;
735 }
736
737 ioreq_runio_qemu_aio(ioreq);
738 }
739
740 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
741 qemu_bh_schedule(blkdev->bh);
742 }
743 }
744
745 /* ------------------------------------------------------------- */
746
747 static void blk_bh(void *opaque)
748 {
749 struct XenBlkDev *blkdev = opaque;
750 blk_handle_requests(blkdev);
751 }
752
753 /*
754 * We need to account for the grant allocations requiring contiguous
755 * chunks; the worst case number would be
756 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
757 * but in order to keep things simple just use
758 * 2 * max_req * max_seg.
759 */
760 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
761
762 static void blk_alloc(struct XenDevice *xendev)
763 {
764 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
765
766 QLIST_INIT(&blkdev->inflight);
767 QLIST_INIT(&blkdev->finished);
768 QLIST_INIT(&blkdev->freelist);
769 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
770 if (xen_mode != XEN_EMULATE) {
771 batch_maps = 1;
772 }
773 if (xengnttab_set_max_grants(xendev->gnttabdev,
774 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
775 xen_be_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
776 strerror(errno));
777 }
778 }
779
780 static void blk_parse_discard(struct XenBlkDev *blkdev)
781 {
782 int enable;
783
784 blkdev->feature_discard = true;
785
786 if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
787 blkdev->feature_discard = !!enable;
788 }
789
790 if (blkdev->feature_discard) {
791 xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
792 }
793 }
794
795 static int blk_init(struct XenDevice *xendev)
796 {
797 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
798 int info = 0;
799 char *directiosafe = NULL;
800
801 /* read xenstore entries */
802 if (blkdev->params == NULL) {
803 char *h = NULL;
804 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
805 if (blkdev->params != NULL) {
806 h = strchr(blkdev->params, ':');
807 }
808 if (h != NULL) {
809 blkdev->fileproto = blkdev->params;
810 blkdev->filename = h+1;
811 *h = 0;
812 } else {
813 blkdev->fileproto = "<unset>";
814 blkdev->filename = blkdev->params;
815 }
816 }
817 if (!strcmp("aio", blkdev->fileproto)) {
818 blkdev->fileproto = "raw";
819 }
820 if (!strcmp("vhd", blkdev->fileproto)) {
821 blkdev->fileproto = "vpc";
822 }
823 if (blkdev->mode == NULL) {
824 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
825 }
826 if (blkdev->type == NULL) {
827 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
828 }
829 if (blkdev->dev == NULL) {
830 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
831 }
832 if (blkdev->devtype == NULL) {
833 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
834 }
835 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
836 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
837
838 /* do we have all we need? */
839 if (blkdev->params == NULL ||
840 blkdev->mode == NULL ||
841 blkdev->type == NULL ||
842 blkdev->dev == NULL) {
843 goto out_error;
844 }
845
846 /* read-only ? */
847 if (strcmp(blkdev->mode, "w")) {
848 info |= VDISK_READONLY;
849 }
850
851 /* cdrom ? */
852 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
853 info |= VDISK_CDROM;
854 }
855
856 blkdev->file_blk = BLOCK_SIZE;
857
858 /* fill info
859 * blk_connect supplies sector-size and sectors
860 */
861 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
862 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
863 xenstore_write_be_int(&blkdev->xendev, "info", info);
864
865 blk_parse_discard(blkdev);
866
867 g_free(directiosafe);
868 return 0;
869
870 out_error:
871 g_free(blkdev->params);
872 blkdev->params = NULL;
873 g_free(blkdev->mode);
874 blkdev->mode = NULL;
875 g_free(blkdev->type);
876 blkdev->type = NULL;
877 g_free(blkdev->dev);
878 blkdev->dev = NULL;
879 g_free(blkdev->devtype);
880 blkdev->devtype = NULL;
881 g_free(directiosafe);
882 blkdev->directiosafe = false;
883 return -1;
884 }
885
886 static int blk_connect(struct XenDevice *xendev)
887 {
888 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
889 int pers, index, qflags;
890 bool readonly = true;
891
892 /* read-only ? */
893 if (blkdev->directiosafe) {
894 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
895 } else {
896 qflags = BDRV_O_CACHE_WB;
897 }
898 if (strcmp(blkdev->mode, "w") == 0) {
899 qflags |= BDRV_O_RDWR;
900 readonly = false;
901 }
902 if (blkdev->feature_discard) {
903 qflags |= BDRV_O_UNMAP;
904 }
905
906 /* init qemu block driver */
907 index = (blkdev->xendev.dev - 202 * 256) / 16;
908 blkdev->dinfo = drive_get(IF_XEN, 0, index);
909 if (!blkdev->dinfo) {
910 Error *local_err = NULL;
911 QDict *options = NULL;
912
913 if (strcmp(blkdev->fileproto, "<unset>")) {
914 options = qdict_new();
915 qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
916 }
917
918 /* setup via xenbus -> create new block driver instance */
919 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
920 blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
921 qflags, &local_err);
922 if (!blkdev->blk) {
923 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
924 error_get_pretty(local_err));
925 error_free(local_err);
926 return -1;
927 }
928 } else {
929 /* setup via qemu cmdline -> already setup for us */
930 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
931 blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
932 if (blk_is_read_only(blkdev->blk) && !readonly) {
933 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
934 blkdev->blk = NULL;
935 return -1;
936 }
937 /* blkdev->blk is not create by us, we get a reference
938 * so we can blk_unref() unconditionally */
939 blk_ref(blkdev->blk);
940 }
941 blk_attach_dev_nofail(blkdev->blk, blkdev);
942 blkdev->file_size = blk_getlength(blkdev->blk);
943 if (blkdev->file_size < 0) {
944 BlockDriverState *bs = blk_bs(blkdev->blk);
945 const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
946 xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
947 (int)blkdev->file_size, strerror(-blkdev->file_size),
948 drv_name ?: "-");
949 blkdev->file_size = 0;
950 }
951
952 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
953 " size %" PRId64 " (%" PRId64 " MB)\n",
954 blkdev->type, blkdev->fileproto, blkdev->filename,
955 blkdev->file_size, blkdev->file_size >> 20);
956
957 /* Fill in number of sector size and number of sectors */
958 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
959 xenstore_write_be_int64(&blkdev->xendev, "sectors",
960 blkdev->file_size / blkdev->file_blk);
961
962 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
963 return -1;
964 }
965 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
966 &blkdev->xendev.remote_port) == -1) {
967 return -1;
968 }
969 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
970 blkdev->feature_persistent = FALSE;
971 } else {
972 blkdev->feature_persistent = !!pers;
973 }
974
975 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
976 if (blkdev->xendev.protocol) {
977 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
978 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
979 }
980 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
981 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
982 }
983 }
984
985 blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev,
986 blkdev->xendev.dom,
987 blkdev->ring_ref,
988 PROT_READ | PROT_WRITE);
989 if (!blkdev->sring) {
990 return -1;
991 }
992 blkdev->cnt_map++;
993
994 switch (blkdev->protocol) {
995 case BLKIF_PROTOCOL_NATIVE:
996 {
997 blkif_sring_t *sring_native = blkdev->sring;
998 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
999 break;
1000 }
1001 case BLKIF_PROTOCOL_X86_32:
1002 {
1003 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1004
1005 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
1006 break;
1007 }
1008 case BLKIF_PROTOCOL_X86_64:
1009 {
1010 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1011
1012 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
1013 break;
1014 }
1015 }
1016
1017 if (blkdev->feature_persistent) {
1018 /* Init persistent grants */
1019 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1020 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1021 NULL, NULL,
1022 batch_maps ?
1023 (GDestroyNotify)g_free :
1024 (GDestroyNotify)destroy_grant);
1025 blkdev->persistent_regions = NULL;
1026 blkdev->persistent_gnt_count = 0;
1027 }
1028
1029 xen_be_bind_evtchn(&blkdev->xendev);
1030
1031 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
1032 "remote port %d, local port %d\n",
1033 blkdev->xendev.protocol, blkdev->ring_ref,
1034 blkdev->xendev.remote_port, blkdev->xendev.local_port);
1035 return 0;
1036 }
1037
1038 static void blk_disconnect(struct XenDevice *xendev)
1039 {
1040 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1041
1042 if (blkdev->blk) {
1043 blk_detach_dev(blkdev->blk, blkdev);
1044 blk_unref(blkdev->blk);
1045 blkdev->blk = NULL;
1046 }
1047 xen_be_unbind_evtchn(&blkdev->xendev);
1048
1049 if (blkdev->sring) {
1050 xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1051 blkdev->cnt_map--;
1052 blkdev->sring = NULL;
1053 }
1054
1055 /*
1056 * Unmap persistent grants before switching to the closed state
1057 * so the frontend can free them.
1058 *
1059 * In the !batch_maps case g_tree_destroy will take care of unmapping
1060 * the grant, but in the batch_maps case we need to iterate over every
1061 * region in persistent_regions and unmap it.
1062 */
1063 if (blkdev->feature_persistent) {
1064 g_tree_destroy(blkdev->persistent_gnts);
1065 assert(batch_maps || blkdev->persistent_gnt_count == 0);
1066 if (batch_maps) {
1067 blkdev->persistent_gnt_count = 0;
1068 g_slist_foreach(blkdev->persistent_regions,
1069 (GFunc)remove_persistent_region, blkdev);
1070 g_slist_free(blkdev->persistent_regions);
1071 }
1072 blkdev->feature_persistent = false;
1073 }
1074 }
1075
1076 static int blk_free(struct XenDevice *xendev)
1077 {
1078 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1079 struct ioreq *ioreq;
1080
1081 if (blkdev->blk || blkdev->sring) {
1082 blk_disconnect(xendev);
1083 }
1084
1085 while (!QLIST_EMPTY(&blkdev->freelist)) {
1086 ioreq = QLIST_FIRST(&blkdev->freelist);
1087 QLIST_REMOVE(ioreq, list);
1088 qemu_iovec_destroy(&ioreq->v);
1089 g_free(ioreq);
1090 }
1091
1092 g_free(blkdev->params);
1093 g_free(blkdev->mode);
1094 g_free(blkdev->type);
1095 g_free(blkdev->dev);
1096 g_free(blkdev->devtype);
1097 qemu_bh_delete(blkdev->bh);
1098 return 0;
1099 }
1100
1101 static void blk_event(struct XenDevice *xendev)
1102 {
1103 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1104
1105 qemu_bh_schedule(blkdev->bh);
1106 }
1107
1108 struct XenDevOps xen_blkdev_ops = {
1109 .size = sizeof(struct XenBlkDev),
1110 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1111 .alloc = blk_alloc,
1112 .init = blk_init,
1113 .initialise = blk_connect,
1114 .disconnect = blk_disconnect,
1115 .event = blk_event,
1116 .free = blk_free,
1117 };