]> git.proxmox.com Git - mirror_qemu.git/blob - hw/block/xen_disk.c
include/qemu/osdep.h: Don't include qapi/error.h
[mirror_qemu.git] / hw / block / xen_disk.c
1 /*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
20 */
21
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
24 #include <sys/mman.h>
25 #include <sys/uio.h>
26
27 #include "hw/hw.h"
28 #include "hw/xen/xen_backend.h"
29 #include "xen_blkif.h"
30 #include "sysemu/blockdev.h"
31 #include "sysemu/block-backend.h"
32 #include "qapi/error.h"
33 #include "qapi/qmp/qdict.h"
34 #include "qapi/qmp/qstring.h"
35
36 /* ------------------------------------------------------------- */
37
38 static int batch_maps = 0;
39
40 static int max_requests = 32;
41
42 /* ------------------------------------------------------------- */
43
44 #define BLOCK_SIZE 512
45 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
46
47 struct PersistentGrant {
48 void *page;
49 struct XenBlkDev *blkdev;
50 };
51
52 typedef struct PersistentGrant PersistentGrant;
53
54 struct PersistentRegion {
55 void *addr;
56 int num;
57 };
58
59 typedef struct PersistentRegion PersistentRegion;
60
61 struct ioreq {
62 blkif_request_t req;
63 int16_t status;
64
65 /* parsed request */
66 off_t start;
67 QEMUIOVector v;
68 int presync;
69 uint8_t mapped;
70
71 /* grant mapping */
72 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 int prot;
75 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
76 void *pages;
77 int num_unmap;
78
79 /* aio status */
80 int aio_inflight;
81 int aio_errors;
82
83 struct XenBlkDev *blkdev;
84 QLIST_ENTRY(ioreq) list;
85 BlockAcctCookie acct;
86 };
87
88 struct XenBlkDev {
89 struct XenDevice xendev; /* must be first */
90 char *params;
91 char *mode;
92 char *type;
93 char *dev;
94 char *devtype;
95 bool directiosafe;
96 const char *fileproto;
97 const char *filename;
98 int ring_ref;
99 void *sring;
100 int64_t file_blk;
101 int64_t file_size;
102 int protocol;
103 blkif_back_rings_t rings;
104 int more_work;
105 int cnt_map;
106
107 /* request lists */
108 QLIST_HEAD(inflight_head, ioreq) inflight;
109 QLIST_HEAD(finished_head, ioreq) finished;
110 QLIST_HEAD(freelist_head, ioreq) freelist;
111 int requests_total;
112 int requests_inflight;
113 int requests_finished;
114
115 /* Persistent grants extension */
116 gboolean feature_discard;
117 gboolean feature_persistent;
118 GTree *persistent_gnts;
119 GSList *persistent_regions;
120 unsigned int persistent_gnt_count;
121 unsigned int max_grants;
122
123 /* qemu block driver */
124 DriveInfo *dinfo;
125 BlockBackend *blk;
126 QEMUBH *bh;
127 };
128
129 /* ------------------------------------------------------------- */
130
131 static void ioreq_reset(struct ioreq *ioreq)
132 {
133 memset(&ioreq->req, 0, sizeof(ioreq->req));
134 ioreq->status = 0;
135 ioreq->start = 0;
136 ioreq->presync = 0;
137 ioreq->mapped = 0;
138
139 memset(ioreq->domids, 0, sizeof(ioreq->domids));
140 memset(ioreq->refs, 0, sizeof(ioreq->refs));
141 ioreq->prot = 0;
142 memset(ioreq->page, 0, sizeof(ioreq->page));
143 ioreq->pages = NULL;
144
145 ioreq->aio_inflight = 0;
146 ioreq->aio_errors = 0;
147
148 ioreq->blkdev = NULL;
149 memset(&ioreq->list, 0, sizeof(ioreq->list));
150 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
151
152 qemu_iovec_reset(&ioreq->v);
153 }
154
155 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
156 {
157 uint ua = GPOINTER_TO_UINT(a);
158 uint ub = GPOINTER_TO_UINT(b);
159 return (ua > ub) - (ua < ub);
160 }
161
162 static void destroy_grant(gpointer pgnt)
163 {
164 PersistentGrant *grant = pgnt;
165 xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
166
167 if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
168 xen_be_printf(&grant->blkdev->xendev, 0,
169 "xengnttab_unmap failed: %s\n",
170 strerror(errno));
171 }
172 grant->blkdev->persistent_gnt_count--;
173 xen_be_printf(&grant->blkdev->xendev, 3,
174 "unmapped grant %p\n", grant->page);
175 g_free(grant);
176 }
177
178 static void remove_persistent_region(gpointer data, gpointer dev)
179 {
180 PersistentRegion *region = data;
181 struct XenBlkDev *blkdev = dev;
182 xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
183
184 if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
185 xen_be_printf(&blkdev->xendev, 0,
186 "xengnttab_unmap region %p failed: %s\n",
187 region->addr, strerror(errno));
188 }
189 xen_be_printf(&blkdev->xendev, 3,
190 "unmapped grant region %p with %d pages\n",
191 region->addr, region->num);
192 g_free(region);
193 }
194
195 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
196 {
197 struct ioreq *ioreq = NULL;
198
199 if (QLIST_EMPTY(&blkdev->freelist)) {
200 if (blkdev->requests_total >= max_requests) {
201 goto out;
202 }
203 /* allocate new struct */
204 ioreq = g_malloc0(sizeof(*ioreq));
205 ioreq->blkdev = blkdev;
206 blkdev->requests_total++;
207 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
208 } else {
209 /* get one from freelist */
210 ioreq = QLIST_FIRST(&blkdev->freelist);
211 QLIST_REMOVE(ioreq, list);
212 }
213 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
214 blkdev->requests_inflight++;
215
216 out:
217 return ioreq;
218 }
219
220 static void ioreq_finish(struct ioreq *ioreq)
221 {
222 struct XenBlkDev *blkdev = ioreq->blkdev;
223
224 QLIST_REMOVE(ioreq, list);
225 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
226 blkdev->requests_inflight--;
227 blkdev->requests_finished++;
228 }
229
230 static void ioreq_release(struct ioreq *ioreq, bool finish)
231 {
232 struct XenBlkDev *blkdev = ioreq->blkdev;
233
234 QLIST_REMOVE(ioreq, list);
235 ioreq_reset(ioreq);
236 ioreq->blkdev = blkdev;
237 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
238 if (finish) {
239 blkdev->requests_finished--;
240 } else {
241 blkdev->requests_inflight--;
242 }
243 }
244
245 /*
246 * translate request into iovec + start offset
247 * do sanity checks along the way
248 */
249 static int ioreq_parse(struct ioreq *ioreq)
250 {
251 struct XenBlkDev *blkdev = ioreq->blkdev;
252 uintptr_t mem;
253 size_t len;
254 int i;
255
256 xen_be_printf(&blkdev->xendev, 3,
257 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
258 ioreq->req.operation, ioreq->req.nr_segments,
259 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
260 switch (ioreq->req.operation) {
261 case BLKIF_OP_READ:
262 ioreq->prot = PROT_WRITE; /* to memory */
263 break;
264 case BLKIF_OP_FLUSH_DISKCACHE:
265 ioreq->presync = 1;
266 if (!ioreq->req.nr_segments) {
267 return 0;
268 }
269 /* fall through */
270 case BLKIF_OP_WRITE:
271 ioreq->prot = PROT_READ; /* from memory */
272 break;
273 case BLKIF_OP_DISCARD:
274 return 0;
275 default:
276 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
277 ioreq->req.operation);
278 goto err;
279 };
280
281 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
282 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
283 goto err;
284 }
285
286 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
287 for (i = 0; i < ioreq->req.nr_segments; i++) {
288 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
289 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
290 goto err;
291 }
292 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
293 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
294 goto err;
295 }
296 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
297 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
298 goto err;
299 }
300
301 ioreq->domids[i] = blkdev->xendev.dom;
302 ioreq->refs[i] = ioreq->req.seg[i].gref;
303
304 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
305 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
306 qemu_iovec_add(&ioreq->v, (void*)mem, len);
307 }
308 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
309 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
310 goto err;
311 }
312 return 0;
313
314 err:
315 ioreq->status = BLKIF_RSP_ERROR;
316 return -1;
317 }
318
319 static void ioreq_unmap(struct ioreq *ioreq)
320 {
321 xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
322 int i;
323
324 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
325 return;
326 }
327 if (batch_maps) {
328 if (!ioreq->pages) {
329 return;
330 }
331 if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
332 xen_be_printf(&ioreq->blkdev->xendev, 0,
333 "xengnttab_unmap failed: %s\n",
334 strerror(errno));
335 }
336 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
337 ioreq->pages = NULL;
338 } else {
339 for (i = 0; i < ioreq->num_unmap; i++) {
340 if (!ioreq->page[i]) {
341 continue;
342 }
343 if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
344 xen_be_printf(&ioreq->blkdev->xendev, 0,
345 "xengnttab_unmap failed: %s\n",
346 strerror(errno));
347 }
348 ioreq->blkdev->cnt_map--;
349 ioreq->page[i] = NULL;
350 }
351 }
352 ioreq->mapped = 0;
353 }
354
355 static int ioreq_map(struct ioreq *ioreq)
356 {
357 xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
358 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
359 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
360 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
361 int i, j, new_maps = 0;
362 PersistentGrant *grant;
363 PersistentRegion *region;
364 /* domids and refs variables will contain the information necessary
365 * to map the grants that are needed to fulfill this request.
366 *
367 * After mapping the needed grants, the page array will contain the
368 * memory address of each granted page in the order specified in ioreq
369 * (disregarding if it's a persistent grant or not).
370 */
371
372 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
373 return 0;
374 }
375 if (ioreq->blkdev->feature_persistent) {
376 for (i = 0; i < ioreq->v.niov; i++) {
377 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
378 GUINT_TO_POINTER(ioreq->refs[i]));
379
380 if (grant != NULL) {
381 page[i] = grant->page;
382 xen_be_printf(&ioreq->blkdev->xendev, 3,
383 "using persistent-grant %" PRIu32 "\n",
384 ioreq->refs[i]);
385 } else {
386 /* Add the grant to the list of grants that
387 * should be mapped
388 */
389 domids[new_maps] = ioreq->domids[i];
390 refs[new_maps] = ioreq->refs[i];
391 page[i] = NULL;
392 new_maps++;
393 }
394 }
395 /* Set the protection to RW, since grants may be reused later
396 * with a different protection than the one needed for this request
397 */
398 ioreq->prot = PROT_WRITE | PROT_READ;
399 } else {
400 /* All grants in the request should be mapped */
401 memcpy(refs, ioreq->refs, sizeof(refs));
402 memcpy(domids, ioreq->domids, sizeof(domids));
403 memset(page, 0, sizeof(page));
404 new_maps = ioreq->v.niov;
405 }
406
407 if (batch_maps && new_maps) {
408 ioreq->pages = xengnttab_map_grant_refs
409 (gnt, new_maps, domids, refs, ioreq->prot);
410 if (ioreq->pages == NULL) {
411 xen_be_printf(&ioreq->blkdev->xendev, 0,
412 "can't map %d grant refs (%s, %d maps)\n",
413 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
414 return -1;
415 }
416 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
417 if (page[i] == NULL) {
418 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
419 }
420 }
421 ioreq->blkdev->cnt_map += new_maps;
422 } else if (new_maps) {
423 for (i = 0; i < new_maps; i++) {
424 ioreq->page[i] = xengnttab_map_grant_ref
425 (gnt, domids[i], refs[i], ioreq->prot);
426 if (ioreq->page[i] == NULL) {
427 xen_be_printf(&ioreq->blkdev->xendev, 0,
428 "can't map grant ref %d (%s, %d maps)\n",
429 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
430 ioreq->mapped = 1;
431 ioreq_unmap(ioreq);
432 return -1;
433 }
434 ioreq->blkdev->cnt_map++;
435 }
436 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
437 if (page[i] == NULL) {
438 page[i] = ioreq->page[j++];
439 }
440 }
441 }
442 if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
443 (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
444 ioreq->blkdev->max_grants))) {
445 /*
446 * If we are using persistent grants and batch mappings only
447 * add the new maps to the list of persistent grants if the whole
448 * area can be persistently mapped.
449 */
450 if (batch_maps) {
451 region = g_malloc0(sizeof(*region));
452 region->addr = ioreq->pages;
453 region->num = new_maps;
454 ioreq->blkdev->persistent_regions = g_slist_append(
455 ioreq->blkdev->persistent_regions,
456 region);
457 }
458 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
459 && new_maps) {
460 /* Go through the list of newly mapped grants and add as many
461 * as possible to the list of persistently mapped grants.
462 *
463 * Since we start at the end of ioreq->page(s), we only need
464 * to decrease new_maps to prevent this granted pages from
465 * being unmapped in ioreq_unmap.
466 */
467 grant = g_malloc0(sizeof(*grant));
468 new_maps--;
469 if (batch_maps) {
470 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
471 } else {
472 grant->page = ioreq->page[new_maps];
473 }
474 grant->blkdev = ioreq->blkdev;
475 xen_be_printf(&ioreq->blkdev->xendev, 3,
476 "adding grant %" PRIu32 " page: %p\n",
477 refs[new_maps], grant->page);
478 g_tree_insert(ioreq->blkdev->persistent_gnts,
479 GUINT_TO_POINTER(refs[new_maps]),
480 grant);
481 ioreq->blkdev->persistent_gnt_count++;
482 }
483 assert(!batch_maps || new_maps == 0);
484 }
485 for (i = 0; i < ioreq->v.niov; i++) {
486 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
487 }
488 ioreq->mapped = 1;
489 ioreq->num_unmap = new_maps;
490 return 0;
491 }
492
493 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
494
495 static void qemu_aio_complete(void *opaque, int ret)
496 {
497 struct ioreq *ioreq = opaque;
498
499 if (ret != 0) {
500 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
501 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
502 ioreq->aio_errors++;
503 }
504
505 ioreq->aio_inflight--;
506 if (ioreq->presync) {
507 ioreq->presync = 0;
508 ioreq_runio_qemu_aio(ioreq);
509 return;
510 }
511 if (ioreq->aio_inflight > 0) {
512 return;
513 }
514
515 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
516 ioreq_unmap(ioreq);
517 ioreq_finish(ioreq);
518 switch (ioreq->req.operation) {
519 case BLKIF_OP_WRITE:
520 case BLKIF_OP_FLUSH_DISKCACHE:
521 if (!ioreq->req.nr_segments) {
522 break;
523 }
524 case BLKIF_OP_READ:
525 if (ioreq->status == BLKIF_RSP_OKAY) {
526 block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
527 } else {
528 block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
529 }
530 break;
531 case BLKIF_OP_DISCARD:
532 default:
533 break;
534 }
535 qemu_bh_schedule(ioreq->blkdev->bh);
536 }
537
538 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
539 {
540 struct XenBlkDev *blkdev = ioreq->blkdev;
541
542 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
543 goto err_no_map;
544 }
545
546 ioreq->aio_inflight++;
547 if (ioreq->presync) {
548 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
549 return 0;
550 }
551
552 switch (ioreq->req.operation) {
553 case BLKIF_OP_READ:
554 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
555 ioreq->v.size, BLOCK_ACCT_READ);
556 ioreq->aio_inflight++;
557 blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE,
558 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
559 qemu_aio_complete, ioreq);
560 break;
561 case BLKIF_OP_WRITE:
562 case BLKIF_OP_FLUSH_DISKCACHE:
563 if (!ioreq->req.nr_segments) {
564 break;
565 }
566
567 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
568 ioreq->v.size,
569 ioreq->req.operation == BLKIF_OP_WRITE ?
570 BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
571 ioreq->aio_inflight++;
572 blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
573 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
574 qemu_aio_complete, ioreq);
575 break;
576 case BLKIF_OP_DISCARD:
577 {
578 struct blkif_request_discard *discard_req = (void *)&ioreq->req;
579 ioreq->aio_inflight++;
580 blk_aio_discard(blkdev->blk,
581 discard_req->sector_number, discard_req->nr_sectors,
582 qemu_aio_complete, ioreq);
583 break;
584 }
585 default:
586 /* unknown operation (shouldn't happen -- parse catches this) */
587 goto err;
588 }
589
590 qemu_aio_complete(ioreq, 0);
591
592 return 0;
593
594 err:
595 ioreq_unmap(ioreq);
596 err_no_map:
597 ioreq_finish(ioreq);
598 ioreq->status = BLKIF_RSP_ERROR;
599 return -1;
600 }
601
602 static int blk_send_response_one(struct ioreq *ioreq)
603 {
604 struct XenBlkDev *blkdev = ioreq->blkdev;
605 int send_notify = 0;
606 int have_requests = 0;
607 blkif_response_t resp;
608 void *dst;
609
610 resp.id = ioreq->req.id;
611 resp.operation = ioreq->req.operation;
612 resp.status = ioreq->status;
613
614 /* Place on the response ring for the relevant domain. */
615 switch (blkdev->protocol) {
616 case BLKIF_PROTOCOL_NATIVE:
617 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
618 break;
619 case BLKIF_PROTOCOL_X86_32:
620 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
621 blkdev->rings.x86_32_part.rsp_prod_pvt);
622 break;
623 case BLKIF_PROTOCOL_X86_64:
624 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
625 blkdev->rings.x86_64_part.rsp_prod_pvt);
626 break;
627 default:
628 dst = NULL;
629 return 0;
630 }
631 memcpy(dst, &resp, sizeof(resp));
632 blkdev->rings.common.rsp_prod_pvt++;
633
634 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
635 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
636 /*
637 * Tail check for pending requests. Allows frontend to avoid
638 * notifications if requests are already in flight (lower
639 * overheads and promotes batching).
640 */
641 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
642 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
643 have_requests = 1;
644 }
645
646 if (have_requests) {
647 blkdev->more_work++;
648 }
649 return send_notify;
650 }
651
652 /* walk finished list, send outstanding responses, free requests */
653 static void blk_send_response_all(struct XenBlkDev *blkdev)
654 {
655 struct ioreq *ioreq;
656 int send_notify = 0;
657
658 while (!QLIST_EMPTY(&blkdev->finished)) {
659 ioreq = QLIST_FIRST(&blkdev->finished);
660 send_notify += blk_send_response_one(ioreq);
661 ioreq_release(ioreq, true);
662 }
663 if (send_notify) {
664 xen_be_send_notify(&blkdev->xendev);
665 }
666 }
667
668 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
669 {
670 switch (blkdev->protocol) {
671 case BLKIF_PROTOCOL_NATIVE:
672 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
673 sizeof(ioreq->req));
674 break;
675 case BLKIF_PROTOCOL_X86_32:
676 blkif_get_x86_32_req(&ioreq->req,
677 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
678 break;
679 case BLKIF_PROTOCOL_X86_64:
680 blkif_get_x86_64_req(&ioreq->req,
681 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
682 break;
683 }
684 return 0;
685 }
686
687 static void blk_handle_requests(struct XenBlkDev *blkdev)
688 {
689 RING_IDX rc, rp;
690 struct ioreq *ioreq;
691
692 blkdev->more_work = 0;
693
694 rc = blkdev->rings.common.req_cons;
695 rp = blkdev->rings.common.sring->req_prod;
696 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
697
698 blk_send_response_all(blkdev);
699 while (rc != rp) {
700 /* pull request from ring */
701 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
702 break;
703 }
704 ioreq = ioreq_start(blkdev);
705 if (ioreq == NULL) {
706 blkdev->more_work++;
707 break;
708 }
709 blk_get_request(blkdev, ioreq, rc);
710 blkdev->rings.common.req_cons = ++rc;
711
712 /* parse them */
713 if (ioreq_parse(ioreq) != 0) {
714
715 switch (ioreq->req.operation) {
716 case BLKIF_OP_READ:
717 block_acct_invalid(blk_get_stats(blkdev->blk),
718 BLOCK_ACCT_READ);
719 break;
720 case BLKIF_OP_WRITE:
721 block_acct_invalid(blk_get_stats(blkdev->blk),
722 BLOCK_ACCT_WRITE);
723 break;
724 case BLKIF_OP_FLUSH_DISKCACHE:
725 block_acct_invalid(blk_get_stats(blkdev->blk),
726 BLOCK_ACCT_FLUSH);
727 default:
728 break;
729 };
730
731 if (blk_send_response_one(ioreq)) {
732 xen_be_send_notify(&blkdev->xendev);
733 }
734 ioreq_release(ioreq, false);
735 continue;
736 }
737
738 ioreq_runio_qemu_aio(ioreq);
739 }
740
741 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
742 qemu_bh_schedule(blkdev->bh);
743 }
744 }
745
746 /* ------------------------------------------------------------- */
747
748 static void blk_bh(void *opaque)
749 {
750 struct XenBlkDev *blkdev = opaque;
751 blk_handle_requests(blkdev);
752 }
753
754 /*
755 * We need to account for the grant allocations requiring contiguous
756 * chunks; the worst case number would be
757 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
758 * but in order to keep things simple just use
759 * 2 * max_req * max_seg.
760 */
761 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
762
763 static void blk_alloc(struct XenDevice *xendev)
764 {
765 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
766
767 QLIST_INIT(&blkdev->inflight);
768 QLIST_INIT(&blkdev->finished);
769 QLIST_INIT(&blkdev->freelist);
770 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
771 if (xen_mode != XEN_EMULATE) {
772 batch_maps = 1;
773 }
774 if (xengnttab_set_max_grants(xendev->gnttabdev,
775 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
776 xen_be_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
777 strerror(errno));
778 }
779 }
780
781 static void blk_parse_discard(struct XenBlkDev *blkdev)
782 {
783 int enable;
784
785 blkdev->feature_discard = true;
786
787 if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
788 blkdev->feature_discard = !!enable;
789 }
790
791 if (blkdev->feature_discard) {
792 xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
793 }
794 }
795
796 static int blk_init(struct XenDevice *xendev)
797 {
798 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
799 int info = 0;
800 char *directiosafe = NULL;
801
802 /* read xenstore entries */
803 if (blkdev->params == NULL) {
804 char *h = NULL;
805 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
806 if (blkdev->params != NULL) {
807 h = strchr(blkdev->params, ':');
808 }
809 if (h != NULL) {
810 blkdev->fileproto = blkdev->params;
811 blkdev->filename = h+1;
812 *h = 0;
813 } else {
814 blkdev->fileproto = "<unset>";
815 blkdev->filename = blkdev->params;
816 }
817 }
818 if (!strcmp("aio", blkdev->fileproto)) {
819 blkdev->fileproto = "raw";
820 }
821 if (!strcmp("vhd", blkdev->fileproto)) {
822 blkdev->fileproto = "vpc";
823 }
824 if (blkdev->mode == NULL) {
825 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
826 }
827 if (blkdev->type == NULL) {
828 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
829 }
830 if (blkdev->dev == NULL) {
831 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
832 }
833 if (blkdev->devtype == NULL) {
834 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
835 }
836 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
837 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
838
839 /* do we have all we need? */
840 if (blkdev->params == NULL ||
841 blkdev->mode == NULL ||
842 blkdev->type == NULL ||
843 blkdev->dev == NULL) {
844 goto out_error;
845 }
846
847 /* read-only ? */
848 if (strcmp(blkdev->mode, "w")) {
849 info |= VDISK_READONLY;
850 }
851
852 /* cdrom ? */
853 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
854 info |= VDISK_CDROM;
855 }
856
857 blkdev->file_blk = BLOCK_SIZE;
858
859 /* fill info
860 * blk_connect supplies sector-size and sectors
861 */
862 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
863 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
864 xenstore_write_be_int(&blkdev->xendev, "info", info);
865
866 blk_parse_discard(blkdev);
867
868 g_free(directiosafe);
869 return 0;
870
871 out_error:
872 g_free(blkdev->params);
873 blkdev->params = NULL;
874 g_free(blkdev->mode);
875 blkdev->mode = NULL;
876 g_free(blkdev->type);
877 blkdev->type = NULL;
878 g_free(blkdev->dev);
879 blkdev->dev = NULL;
880 g_free(blkdev->devtype);
881 blkdev->devtype = NULL;
882 g_free(directiosafe);
883 blkdev->directiosafe = false;
884 return -1;
885 }
886
887 static int blk_connect(struct XenDevice *xendev)
888 {
889 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
890 int pers, index, qflags;
891 bool readonly = true;
892
893 /* read-only ? */
894 if (blkdev->directiosafe) {
895 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
896 } else {
897 qflags = BDRV_O_CACHE_WB;
898 }
899 if (strcmp(blkdev->mode, "w") == 0) {
900 qflags |= BDRV_O_RDWR;
901 readonly = false;
902 }
903 if (blkdev->feature_discard) {
904 qflags |= BDRV_O_UNMAP;
905 }
906
907 /* init qemu block driver */
908 index = (blkdev->xendev.dev - 202 * 256) / 16;
909 blkdev->dinfo = drive_get(IF_XEN, 0, index);
910 if (!blkdev->dinfo) {
911 Error *local_err = NULL;
912 QDict *options = NULL;
913
914 if (strcmp(blkdev->fileproto, "<unset>")) {
915 options = qdict_new();
916 qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
917 }
918
919 /* setup via xenbus -> create new block driver instance */
920 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
921 blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
922 qflags, &local_err);
923 if (!blkdev->blk) {
924 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
925 error_get_pretty(local_err));
926 error_free(local_err);
927 return -1;
928 }
929 } else {
930 /* setup via qemu cmdline -> already setup for us */
931 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
932 blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
933 if (blk_is_read_only(blkdev->blk) && !readonly) {
934 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
935 blkdev->blk = NULL;
936 return -1;
937 }
938 /* blkdev->blk is not create by us, we get a reference
939 * so we can blk_unref() unconditionally */
940 blk_ref(blkdev->blk);
941 }
942 blk_attach_dev_nofail(blkdev->blk, blkdev);
943 blkdev->file_size = blk_getlength(blkdev->blk);
944 if (blkdev->file_size < 0) {
945 BlockDriverState *bs = blk_bs(blkdev->blk);
946 const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
947 xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
948 (int)blkdev->file_size, strerror(-blkdev->file_size),
949 drv_name ?: "-");
950 blkdev->file_size = 0;
951 }
952
953 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
954 " size %" PRId64 " (%" PRId64 " MB)\n",
955 blkdev->type, blkdev->fileproto, blkdev->filename,
956 blkdev->file_size, blkdev->file_size >> 20);
957
958 /* Fill in number of sector size and number of sectors */
959 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
960 xenstore_write_be_int64(&blkdev->xendev, "sectors",
961 blkdev->file_size / blkdev->file_blk);
962
963 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
964 return -1;
965 }
966 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
967 &blkdev->xendev.remote_port) == -1) {
968 return -1;
969 }
970 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
971 blkdev->feature_persistent = FALSE;
972 } else {
973 blkdev->feature_persistent = !!pers;
974 }
975
976 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
977 if (blkdev->xendev.protocol) {
978 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
979 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
980 }
981 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
982 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
983 }
984 }
985
986 blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev,
987 blkdev->xendev.dom,
988 blkdev->ring_ref,
989 PROT_READ | PROT_WRITE);
990 if (!blkdev->sring) {
991 return -1;
992 }
993 blkdev->cnt_map++;
994
995 switch (blkdev->protocol) {
996 case BLKIF_PROTOCOL_NATIVE:
997 {
998 blkif_sring_t *sring_native = blkdev->sring;
999 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
1000 break;
1001 }
1002 case BLKIF_PROTOCOL_X86_32:
1003 {
1004 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1005
1006 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
1007 break;
1008 }
1009 case BLKIF_PROTOCOL_X86_64:
1010 {
1011 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1012
1013 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
1014 break;
1015 }
1016 }
1017
1018 if (blkdev->feature_persistent) {
1019 /* Init persistent grants */
1020 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1021 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1022 NULL, NULL,
1023 batch_maps ?
1024 (GDestroyNotify)g_free :
1025 (GDestroyNotify)destroy_grant);
1026 blkdev->persistent_regions = NULL;
1027 blkdev->persistent_gnt_count = 0;
1028 }
1029
1030 xen_be_bind_evtchn(&blkdev->xendev);
1031
1032 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
1033 "remote port %d, local port %d\n",
1034 blkdev->xendev.protocol, blkdev->ring_ref,
1035 blkdev->xendev.remote_port, blkdev->xendev.local_port);
1036 return 0;
1037 }
1038
1039 static void blk_disconnect(struct XenDevice *xendev)
1040 {
1041 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1042
1043 if (blkdev->blk) {
1044 blk_detach_dev(blkdev->blk, blkdev);
1045 blk_unref(blkdev->blk);
1046 blkdev->blk = NULL;
1047 }
1048 xen_be_unbind_evtchn(&blkdev->xendev);
1049
1050 if (blkdev->sring) {
1051 xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1052 blkdev->cnt_map--;
1053 blkdev->sring = NULL;
1054 }
1055
1056 /*
1057 * Unmap persistent grants before switching to the closed state
1058 * so the frontend can free them.
1059 *
1060 * In the !batch_maps case g_tree_destroy will take care of unmapping
1061 * the grant, but in the batch_maps case we need to iterate over every
1062 * region in persistent_regions and unmap it.
1063 */
1064 if (blkdev->feature_persistent) {
1065 g_tree_destroy(blkdev->persistent_gnts);
1066 assert(batch_maps || blkdev->persistent_gnt_count == 0);
1067 if (batch_maps) {
1068 blkdev->persistent_gnt_count = 0;
1069 g_slist_foreach(blkdev->persistent_regions,
1070 (GFunc)remove_persistent_region, blkdev);
1071 g_slist_free(blkdev->persistent_regions);
1072 }
1073 blkdev->feature_persistent = false;
1074 }
1075 }
1076
1077 static int blk_free(struct XenDevice *xendev)
1078 {
1079 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1080 struct ioreq *ioreq;
1081
1082 if (blkdev->blk || blkdev->sring) {
1083 blk_disconnect(xendev);
1084 }
1085
1086 while (!QLIST_EMPTY(&blkdev->freelist)) {
1087 ioreq = QLIST_FIRST(&blkdev->freelist);
1088 QLIST_REMOVE(ioreq, list);
1089 qemu_iovec_destroy(&ioreq->v);
1090 g_free(ioreq);
1091 }
1092
1093 g_free(blkdev->params);
1094 g_free(blkdev->mode);
1095 g_free(blkdev->type);
1096 g_free(blkdev->dev);
1097 g_free(blkdev->devtype);
1098 qemu_bh_delete(blkdev->bh);
1099 return 0;
1100 }
1101
1102 static void blk_event(struct XenDevice *xendev)
1103 {
1104 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1105
1106 qemu_bh_schedule(blkdev->bh);
1107 }
1108
1109 struct XenDevOps xen_blkdev_ops = {
1110 .size = sizeof(struct XenBlkDev),
1111 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1112 .alloc = blk_alloc,
1113 .init = blk_init,
1114 .initialise = blk_connect,
1115 .disconnect = blk_disconnect,
1116 .event = blk_event,
1117 .free = blk_free,
1118 };