]> git.proxmox.com Git - mirror_qemu.git/blob - hw/block/xen_disk.c
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[mirror_qemu.git] / hw / block / xen_disk.c
1 /*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
20 */
21
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <signal.h>
28 #include <inttypes.h>
29 #include <time.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <sys/mman.h>
36 #include <sys/uio.h>
37
38 #include "hw/hw.h"
39 #include "hw/xen/xen_backend.h"
40 #include "xen_blkif.h"
41 #include "sysemu/blockdev.h"
42
43 /* ------------------------------------------------------------- */
44
45 static int batch_maps = 0;
46
47 static int max_requests = 32;
48
49 /* ------------------------------------------------------------- */
50
51 #define BLOCK_SIZE 512
52 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
53
54 struct PersistentGrant {
55 void *page;
56 struct XenBlkDev *blkdev;
57 };
58
59 typedef struct PersistentGrant PersistentGrant;
60
61 struct ioreq {
62 blkif_request_t req;
63 int16_t status;
64
65 /* parsed request */
66 off_t start;
67 QEMUIOVector v;
68 int presync;
69 int postsync;
70 uint8_t mapped;
71
72 /* grant mapping */
73 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75 int prot;
76 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
77 void *pages;
78 int num_unmap;
79
80 /* aio status */
81 int aio_inflight;
82 int aio_errors;
83
84 struct XenBlkDev *blkdev;
85 QLIST_ENTRY(ioreq) list;
86 BlockAcctCookie acct;
87 };
88
89 struct XenBlkDev {
90 struct XenDevice xendev; /* must be first */
91 char *params;
92 char *mode;
93 char *type;
94 char *dev;
95 char *devtype;
96 bool directiosafe;
97 const char *fileproto;
98 const char *filename;
99 int ring_ref;
100 void *sring;
101 int64_t file_blk;
102 int64_t file_size;
103 int protocol;
104 blkif_back_rings_t rings;
105 int more_work;
106 int cnt_map;
107
108 /* request lists */
109 QLIST_HEAD(inflight_head, ioreq) inflight;
110 QLIST_HEAD(finished_head, ioreq) finished;
111 QLIST_HEAD(freelist_head, ioreq) freelist;
112 int requests_total;
113 int requests_inflight;
114 int requests_finished;
115
116 /* Persistent grants extension */
117 gboolean feature_discard;
118 gboolean feature_persistent;
119 GTree *persistent_gnts;
120 unsigned int persistent_gnt_count;
121 unsigned int max_grants;
122
123 /* qemu block driver */
124 DriveInfo *dinfo;
125 BlockDriverState *bs;
126 QEMUBH *bh;
127 };
128
129 /* ------------------------------------------------------------- */
130
131 static void ioreq_reset(struct ioreq *ioreq)
132 {
133 memset(&ioreq->req, 0, sizeof(ioreq->req));
134 ioreq->status = 0;
135 ioreq->start = 0;
136 ioreq->presync = 0;
137 ioreq->postsync = 0;
138 ioreq->mapped = 0;
139
140 memset(ioreq->domids, 0, sizeof(ioreq->domids));
141 memset(ioreq->refs, 0, sizeof(ioreq->refs));
142 ioreq->prot = 0;
143 memset(ioreq->page, 0, sizeof(ioreq->page));
144 ioreq->pages = NULL;
145
146 ioreq->aio_inflight = 0;
147 ioreq->aio_errors = 0;
148
149 ioreq->blkdev = NULL;
150 memset(&ioreq->list, 0, sizeof(ioreq->list));
151 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
152
153 qemu_iovec_reset(&ioreq->v);
154 }
155
156 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
157 {
158 uint ua = GPOINTER_TO_UINT(a);
159 uint ub = GPOINTER_TO_UINT(b);
160 return (ua > ub) - (ua < ub);
161 }
162
163 static void destroy_grant(gpointer pgnt)
164 {
165 PersistentGrant *grant = pgnt;
166 XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
167
168 if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
169 xen_be_printf(&grant->blkdev->xendev, 0,
170 "xc_gnttab_munmap failed: %s\n",
171 strerror(errno));
172 }
173 grant->blkdev->persistent_gnt_count--;
174 xen_be_printf(&grant->blkdev->xendev, 3,
175 "unmapped grant %p\n", grant->page);
176 g_free(grant);
177 }
178
179 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
180 {
181 struct ioreq *ioreq = NULL;
182
183 if (QLIST_EMPTY(&blkdev->freelist)) {
184 if (blkdev->requests_total >= max_requests) {
185 goto out;
186 }
187 /* allocate new struct */
188 ioreq = g_malloc0(sizeof(*ioreq));
189 ioreq->blkdev = blkdev;
190 blkdev->requests_total++;
191 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
192 } else {
193 /* get one from freelist */
194 ioreq = QLIST_FIRST(&blkdev->freelist);
195 QLIST_REMOVE(ioreq, list);
196 }
197 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
198 blkdev->requests_inflight++;
199
200 out:
201 return ioreq;
202 }
203
204 static void ioreq_finish(struct ioreq *ioreq)
205 {
206 struct XenBlkDev *blkdev = ioreq->blkdev;
207
208 QLIST_REMOVE(ioreq, list);
209 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
210 blkdev->requests_inflight--;
211 blkdev->requests_finished++;
212 }
213
214 static void ioreq_release(struct ioreq *ioreq, bool finish)
215 {
216 struct XenBlkDev *blkdev = ioreq->blkdev;
217
218 QLIST_REMOVE(ioreq, list);
219 ioreq_reset(ioreq);
220 ioreq->blkdev = blkdev;
221 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
222 if (finish) {
223 blkdev->requests_finished--;
224 } else {
225 blkdev->requests_inflight--;
226 }
227 }
228
229 /*
230 * translate request into iovec + start offset
231 * do sanity checks along the way
232 */
233 static int ioreq_parse(struct ioreq *ioreq)
234 {
235 struct XenBlkDev *blkdev = ioreq->blkdev;
236 uintptr_t mem;
237 size_t len;
238 int i;
239
240 xen_be_printf(&blkdev->xendev, 3,
241 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
242 ioreq->req.operation, ioreq->req.nr_segments,
243 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
244 switch (ioreq->req.operation) {
245 case BLKIF_OP_READ:
246 ioreq->prot = PROT_WRITE; /* to memory */
247 break;
248 case BLKIF_OP_FLUSH_DISKCACHE:
249 ioreq->presync = 1;
250 if (!ioreq->req.nr_segments) {
251 return 0;
252 }
253 /* fall through */
254 case BLKIF_OP_WRITE:
255 ioreq->prot = PROT_READ; /* from memory */
256 break;
257 case BLKIF_OP_DISCARD:
258 return 0;
259 default:
260 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
261 ioreq->req.operation);
262 goto err;
263 };
264
265 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
266 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
267 goto err;
268 }
269
270 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
271 for (i = 0; i < ioreq->req.nr_segments; i++) {
272 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
273 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
274 goto err;
275 }
276 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
277 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
278 goto err;
279 }
280 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
281 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
282 goto err;
283 }
284
285 ioreq->domids[i] = blkdev->xendev.dom;
286 ioreq->refs[i] = ioreq->req.seg[i].gref;
287
288 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
289 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
290 qemu_iovec_add(&ioreq->v, (void*)mem, len);
291 }
292 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
293 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
294 goto err;
295 }
296 return 0;
297
298 err:
299 ioreq->status = BLKIF_RSP_ERROR;
300 return -1;
301 }
302
303 static void ioreq_unmap(struct ioreq *ioreq)
304 {
305 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
306 int i;
307
308 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
309 return;
310 }
311 if (batch_maps) {
312 if (!ioreq->pages) {
313 return;
314 }
315 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
316 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
317 strerror(errno));
318 }
319 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
320 ioreq->pages = NULL;
321 } else {
322 for (i = 0; i < ioreq->num_unmap; i++) {
323 if (!ioreq->page[i]) {
324 continue;
325 }
326 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
327 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
328 strerror(errno));
329 }
330 ioreq->blkdev->cnt_map--;
331 ioreq->page[i] = NULL;
332 }
333 }
334 ioreq->mapped = 0;
335 }
336
337 static int ioreq_map(struct ioreq *ioreq)
338 {
339 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
340 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
341 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
342 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
343 int i, j, new_maps = 0;
344 PersistentGrant *grant;
345 /* domids and refs variables will contain the information necessary
346 * to map the grants that are needed to fulfill this request.
347 *
348 * After mapping the needed grants, the page array will contain the
349 * memory address of each granted page in the order specified in ioreq
350 * (disregarding if it's a persistent grant or not).
351 */
352
353 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
354 return 0;
355 }
356 if (ioreq->blkdev->feature_persistent) {
357 for (i = 0; i < ioreq->v.niov; i++) {
358 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
359 GUINT_TO_POINTER(ioreq->refs[i]));
360
361 if (grant != NULL) {
362 page[i] = grant->page;
363 xen_be_printf(&ioreq->blkdev->xendev, 3,
364 "using persistent-grant %" PRIu32 "\n",
365 ioreq->refs[i]);
366 } else {
367 /* Add the grant to the list of grants that
368 * should be mapped
369 */
370 domids[new_maps] = ioreq->domids[i];
371 refs[new_maps] = ioreq->refs[i];
372 page[i] = NULL;
373 new_maps++;
374 }
375 }
376 /* Set the protection to RW, since grants may be reused later
377 * with a different protection than the one needed for this request
378 */
379 ioreq->prot = PROT_WRITE | PROT_READ;
380 } else {
381 /* All grants in the request should be mapped */
382 memcpy(refs, ioreq->refs, sizeof(refs));
383 memcpy(domids, ioreq->domids, sizeof(domids));
384 memset(page, 0, sizeof(page));
385 new_maps = ioreq->v.niov;
386 }
387
388 if (batch_maps && new_maps) {
389 ioreq->pages = xc_gnttab_map_grant_refs
390 (gnt, new_maps, domids, refs, ioreq->prot);
391 if (ioreq->pages == NULL) {
392 xen_be_printf(&ioreq->blkdev->xendev, 0,
393 "can't map %d grant refs (%s, %d maps)\n",
394 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
395 return -1;
396 }
397 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
398 if (page[i] == NULL) {
399 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
400 }
401 }
402 ioreq->blkdev->cnt_map += new_maps;
403 } else if (new_maps) {
404 for (i = 0; i < new_maps; i++) {
405 ioreq->page[i] = xc_gnttab_map_grant_ref
406 (gnt, domids[i], refs[i], ioreq->prot);
407 if (ioreq->page[i] == NULL) {
408 xen_be_printf(&ioreq->blkdev->xendev, 0,
409 "can't map grant ref %d (%s, %d maps)\n",
410 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
411 ioreq->mapped = 1;
412 ioreq_unmap(ioreq);
413 return -1;
414 }
415 ioreq->blkdev->cnt_map++;
416 }
417 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
418 if (page[i] == NULL) {
419 page[i] = ioreq->page[j++];
420 }
421 }
422 }
423 if (ioreq->blkdev->feature_persistent) {
424 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
425 && new_maps) {
426 /* Go through the list of newly mapped grants and add as many
427 * as possible to the list of persistently mapped grants.
428 *
429 * Since we start at the end of ioreq->page(s), we only need
430 * to decrease new_maps to prevent this granted pages from
431 * being unmapped in ioreq_unmap.
432 */
433 grant = g_malloc0(sizeof(*grant));
434 new_maps--;
435 if (batch_maps) {
436 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
437 } else {
438 grant->page = ioreq->page[new_maps];
439 }
440 grant->blkdev = ioreq->blkdev;
441 xen_be_printf(&ioreq->blkdev->xendev, 3,
442 "adding grant %" PRIu32 " page: %p\n",
443 refs[new_maps], grant->page);
444 g_tree_insert(ioreq->blkdev->persistent_gnts,
445 GUINT_TO_POINTER(refs[new_maps]),
446 grant);
447 ioreq->blkdev->persistent_gnt_count++;
448 }
449 }
450 for (i = 0; i < ioreq->v.niov; i++) {
451 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
452 }
453 ioreq->mapped = 1;
454 ioreq->num_unmap = new_maps;
455 return 0;
456 }
457
458 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
459
460 static void qemu_aio_complete(void *opaque, int ret)
461 {
462 struct ioreq *ioreq = opaque;
463
464 if (ret != 0) {
465 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
466 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
467 ioreq->aio_errors++;
468 }
469
470 ioreq->aio_inflight--;
471 if (ioreq->presync) {
472 ioreq->presync = 0;
473 ioreq_runio_qemu_aio(ioreq);
474 return;
475 }
476 if (ioreq->aio_inflight > 0) {
477 return;
478 }
479 if (ioreq->postsync) {
480 ioreq->postsync = 0;
481 ioreq->aio_inflight++;
482 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
483 return;
484 }
485
486 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
487 ioreq_unmap(ioreq);
488 ioreq_finish(ioreq);
489 switch (ioreq->req.operation) {
490 case BLKIF_OP_WRITE:
491 case BLKIF_OP_FLUSH_DISKCACHE:
492 if (!ioreq->req.nr_segments) {
493 break;
494 }
495 case BLKIF_OP_READ:
496 block_acct_done(bdrv_get_stats(ioreq->blkdev->bs), &ioreq->acct);
497 break;
498 case BLKIF_OP_DISCARD:
499 default:
500 break;
501 }
502 qemu_bh_schedule(ioreq->blkdev->bh);
503 }
504
505 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
506 {
507 struct XenBlkDev *blkdev = ioreq->blkdev;
508
509 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
510 goto err_no_map;
511 }
512
513 ioreq->aio_inflight++;
514 if (ioreq->presync) {
515 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
516 return 0;
517 }
518
519 switch (ioreq->req.operation) {
520 case BLKIF_OP_READ:
521 block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
522 ioreq->v.size, BLOCK_ACCT_READ);
523 ioreq->aio_inflight++;
524 bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
525 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
526 qemu_aio_complete, ioreq);
527 break;
528 case BLKIF_OP_WRITE:
529 case BLKIF_OP_FLUSH_DISKCACHE:
530 if (!ioreq->req.nr_segments) {
531 break;
532 }
533
534 block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
535 ioreq->v.size, BLOCK_ACCT_WRITE);
536 ioreq->aio_inflight++;
537 bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
538 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
539 qemu_aio_complete, ioreq);
540 break;
541 case BLKIF_OP_DISCARD:
542 {
543 struct blkif_request_discard *discard_req = (void *)&ioreq->req;
544 ioreq->aio_inflight++;
545 bdrv_aio_discard(blkdev->bs,
546 discard_req->sector_number, discard_req->nr_sectors,
547 qemu_aio_complete, ioreq);
548 break;
549 }
550 default:
551 /* unknown operation (shouldn't happen -- parse catches this) */
552 goto err;
553 }
554
555 qemu_aio_complete(ioreq, 0);
556
557 return 0;
558
559 err:
560 ioreq_unmap(ioreq);
561 err_no_map:
562 ioreq_finish(ioreq);
563 ioreq->status = BLKIF_RSP_ERROR;
564 return -1;
565 }
566
567 static int blk_send_response_one(struct ioreq *ioreq)
568 {
569 struct XenBlkDev *blkdev = ioreq->blkdev;
570 int send_notify = 0;
571 int have_requests = 0;
572 blkif_response_t resp;
573 void *dst;
574
575 resp.id = ioreq->req.id;
576 resp.operation = ioreq->req.operation;
577 resp.status = ioreq->status;
578
579 /* Place on the response ring for the relevant domain. */
580 switch (blkdev->protocol) {
581 case BLKIF_PROTOCOL_NATIVE:
582 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
583 break;
584 case BLKIF_PROTOCOL_X86_32:
585 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
586 blkdev->rings.x86_32_part.rsp_prod_pvt);
587 break;
588 case BLKIF_PROTOCOL_X86_64:
589 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
590 blkdev->rings.x86_64_part.rsp_prod_pvt);
591 break;
592 default:
593 dst = NULL;
594 return 0;
595 }
596 memcpy(dst, &resp, sizeof(resp));
597 blkdev->rings.common.rsp_prod_pvt++;
598
599 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
600 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
601 /*
602 * Tail check for pending requests. Allows frontend to avoid
603 * notifications if requests are already in flight (lower
604 * overheads and promotes batching).
605 */
606 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
607 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
608 have_requests = 1;
609 }
610
611 if (have_requests) {
612 blkdev->more_work++;
613 }
614 return send_notify;
615 }
616
617 /* walk finished list, send outstanding responses, free requests */
618 static void blk_send_response_all(struct XenBlkDev *blkdev)
619 {
620 struct ioreq *ioreq;
621 int send_notify = 0;
622
623 while (!QLIST_EMPTY(&blkdev->finished)) {
624 ioreq = QLIST_FIRST(&blkdev->finished);
625 send_notify += blk_send_response_one(ioreq);
626 ioreq_release(ioreq, true);
627 }
628 if (send_notify) {
629 xen_be_send_notify(&blkdev->xendev);
630 }
631 }
632
633 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
634 {
635 switch (blkdev->protocol) {
636 case BLKIF_PROTOCOL_NATIVE:
637 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
638 sizeof(ioreq->req));
639 break;
640 case BLKIF_PROTOCOL_X86_32:
641 blkif_get_x86_32_req(&ioreq->req,
642 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
643 break;
644 case BLKIF_PROTOCOL_X86_64:
645 blkif_get_x86_64_req(&ioreq->req,
646 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
647 break;
648 }
649 return 0;
650 }
651
652 static void blk_handle_requests(struct XenBlkDev *blkdev)
653 {
654 RING_IDX rc, rp;
655 struct ioreq *ioreq;
656
657 blkdev->more_work = 0;
658
659 rc = blkdev->rings.common.req_cons;
660 rp = blkdev->rings.common.sring->req_prod;
661 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
662
663 blk_send_response_all(blkdev);
664 while (rc != rp) {
665 /* pull request from ring */
666 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
667 break;
668 }
669 ioreq = ioreq_start(blkdev);
670 if (ioreq == NULL) {
671 blkdev->more_work++;
672 break;
673 }
674 blk_get_request(blkdev, ioreq, rc);
675 blkdev->rings.common.req_cons = ++rc;
676
677 /* parse them */
678 if (ioreq_parse(ioreq) != 0) {
679 if (blk_send_response_one(ioreq)) {
680 xen_be_send_notify(&blkdev->xendev);
681 }
682 ioreq_release(ioreq, false);
683 continue;
684 }
685
686 ioreq_runio_qemu_aio(ioreq);
687 }
688
689 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
690 qemu_bh_schedule(blkdev->bh);
691 }
692 }
693
694 /* ------------------------------------------------------------- */
695
696 static void blk_bh(void *opaque)
697 {
698 struct XenBlkDev *blkdev = opaque;
699 blk_handle_requests(blkdev);
700 }
701
702 /*
703 * We need to account for the grant allocations requiring contiguous
704 * chunks; the worst case number would be
705 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
706 * but in order to keep things simple just use
707 * 2 * max_req * max_seg.
708 */
709 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
710
711 static void blk_alloc(struct XenDevice *xendev)
712 {
713 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
714
715 QLIST_INIT(&blkdev->inflight);
716 QLIST_INIT(&blkdev->finished);
717 QLIST_INIT(&blkdev->freelist);
718 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
719 if (xen_mode != XEN_EMULATE) {
720 batch_maps = 1;
721 }
722 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
723 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
724 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
725 strerror(errno));
726 }
727 }
728
729 static void blk_parse_discard(struct XenBlkDev *blkdev)
730 {
731 int enable;
732
733 blkdev->feature_discard = true;
734
735 if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
736 blkdev->feature_discard = !!enable;
737 }
738
739 if (blkdev->feature_discard) {
740 xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
741 }
742 }
743
744 static int blk_init(struct XenDevice *xendev)
745 {
746 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
747 int info = 0;
748 char *directiosafe = NULL;
749
750 /* read xenstore entries */
751 if (blkdev->params == NULL) {
752 char *h = NULL;
753 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
754 if (blkdev->params != NULL) {
755 h = strchr(blkdev->params, ':');
756 }
757 if (h != NULL) {
758 blkdev->fileproto = blkdev->params;
759 blkdev->filename = h+1;
760 *h = 0;
761 } else {
762 blkdev->fileproto = "<unset>";
763 blkdev->filename = blkdev->params;
764 }
765 }
766 if (!strcmp("aio", blkdev->fileproto)) {
767 blkdev->fileproto = "raw";
768 }
769 if (blkdev->mode == NULL) {
770 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
771 }
772 if (blkdev->type == NULL) {
773 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
774 }
775 if (blkdev->dev == NULL) {
776 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
777 }
778 if (blkdev->devtype == NULL) {
779 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
780 }
781 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
782 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
783
784 /* do we have all we need? */
785 if (blkdev->params == NULL ||
786 blkdev->mode == NULL ||
787 blkdev->type == NULL ||
788 blkdev->dev == NULL) {
789 goto out_error;
790 }
791
792 /* read-only ? */
793 if (strcmp(blkdev->mode, "w")) {
794 info |= VDISK_READONLY;
795 }
796
797 /* cdrom ? */
798 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
799 info |= VDISK_CDROM;
800 }
801
802 blkdev->file_blk = BLOCK_SIZE;
803
804 /* fill info
805 * blk_connect supplies sector-size and sectors
806 */
807 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
808 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
809 xenstore_write_be_int(&blkdev->xendev, "info", info);
810
811 blk_parse_discard(blkdev);
812
813 g_free(directiosafe);
814 return 0;
815
816 out_error:
817 g_free(blkdev->params);
818 blkdev->params = NULL;
819 g_free(blkdev->mode);
820 blkdev->mode = NULL;
821 g_free(blkdev->type);
822 blkdev->type = NULL;
823 g_free(blkdev->dev);
824 blkdev->dev = NULL;
825 g_free(blkdev->devtype);
826 blkdev->devtype = NULL;
827 g_free(directiosafe);
828 blkdev->directiosafe = false;
829 return -1;
830 }
831
832 static int blk_connect(struct XenDevice *xendev)
833 {
834 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
835 int pers, index, qflags;
836 bool readonly = true;
837
838 /* read-only ? */
839 if (blkdev->directiosafe) {
840 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
841 } else {
842 qflags = BDRV_O_CACHE_WB;
843 }
844 if (strcmp(blkdev->mode, "w") == 0) {
845 qflags |= BDRV_O_RDWR;
846 readonly = false;
847 }
848 if (blkdev->feature_discard) {
849 qflags |= BDRV_O_UNMAP;
850 }
851
852 /* init qemu block driver */
853 index = (blkdev->xendev.dev - 202 * 256) / 16;
854 blkdev->dinfo = drive_get(IF_XEN, 0, index);
855 if (!blkdev->dinfo) {
856 Error *local_err = NULL;
857 BlockDriver *drv;
858
859 /* setup via xenbus -> create new block driver instance */
860 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
861 blkdev->bs = bdrv_new(blkdev->dev, NULL);
862 if (!blkdev->bs) {
863 return -1;
864 }
865
866 drv = bdrv_find_whitelisted_format(blkdev->fileproto, readonly);
867 if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags,
868 drv, &local_err) != 0) {
869 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
870 error_get_pretty(local_err));
871 error_free(local_err);
872 bdrv_unref(blkdev->bs);
873 blkdev->bs = NULL;
874 return -1;
875 }
876 } else {
877 /* setup via qemu cmdline -> already setup for us */
878 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
879 blkdev->bs = blkdev->dinfo->bdrv;
880 if (bdrv_is_read_only(blkdev->bs) && !readonly) {
881 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
882 blkdev->bs = NULL;
883 return -1;
884 }
885 /* blkdev->bs is not create by us, we get a reference
886 * so we can bdrv_unref() unconditionally */
887 bdrv_ref(blkdev->bs);
888 }
889 bdrv_attach_dev_nofail(blkdev->bs, blkdev);
890 blkdev->file_size = bdrv_getlength(blkdev->bs);
891 if (blkdev->file_size < 0) {
892 xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
893 (int)blkdev->file_size, strerror(-blkdev->file_size),
894 bdrv_get_format_name(blkdev->bs) ?: "-");
895 blkdev->file_size = 0;
896 }
897
898 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
899 " size %" PRId64 " (%" PRId64 " MB)\n",
900 blkdev->type, blkdev->fileproto, blkdev->filename,
901 blkdev->file_size, blkdev->file_size >> 20);
902
903 /* Fill in number of sector size and number of sectors */
904 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
905 xenstore_write_be_int64(&blkdev->xendev, "sectors",
906 blkdev->file_size / blkdev->file_blk);
907
908 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
909 return -1;
910 }
911 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
912 &blkdev->xendev.remote_port) == -1) {
913 return -1;
914 }
915 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
916 blkdev->feature_persistent = FALSE;
917 } else {
918 blkdev->feature_persistent = !!pers;
919 }
920
921 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
922 if (blkdev->xendev.protocol) {
923 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
924 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
925 }
926 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
927 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
928 }
929 }
930
931 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
932 blkdev->xendev.dom,
933 blkdev->ring_ref,
934 PROT_READ | PROT_WRITE);
935 if (!blkdev->sring) {
936 return -1;
937 }
938 blkdev->cnt_map++;
939
940 switch (blkdev->protocol) {
941 case BLKIF_PROTOCOL_NATIVE:
942 {
943 blkif_sring_t *sring_native = blkdev->sring;
944 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
945 break;
946 }
947 case BLKIF_PROTOCOL_X86_32:
948 {
949 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
950
951 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
952 break;
953 }
954 case BLKIF_PROTOCOL_X86_64:
955 {
956 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
957
958 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
959 break;
960 }
961 }
962
963 if (blkdev->feature_persistent) {
964 /* Init persistent grants */
965 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
966 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
967 NULL, NULL,
968 (GDestroyNotify)destroy_grant);
969 blkdev->persistent_gnt_count = 0;
970 }
971
972 xen_be_bind_evtchn(&blkdev->xendev);
973
974 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
975 "remote port %d, local port %d\n",
976 blkdev->xendev.protocol, blkdev->ring_ref,
977 blkdev->xendev.remote_port, blkdev->xendev.local_port);
978 return 0;
979 }
980
981 static void blk_disconnect(struct XenDevice *xendev)
982 {
983 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
984
985 if (blkdev->bs) {
986 bdrv_detach_dev(blkdev->bs, blkdev);
987 bdrv_unref(blkdev->bs);
988 blkdev->bs = NULL;
989 }
990 xen_be_unbind_evtchn(&blkdev->xendev);
991
992 if (blkdev->sring) {
993 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
994 blkdev->cnt_map--;
995 blkdev->sring = NULL;
996 }
997 }
998
999 static int blk_free(struct XenDevice *xendev)
1000 {
1001 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1002 struct ioreq *ioreq;
1003
1004 if (blkdev->bs || blkdev->sring) {
1005 blk_disconnect(xendev);
1006 }
1007
1008 /* Free persistent grants */
1009 if (blkdev->feature_persistent) {
1010 g_tree_destroy(blkdev->persistent_gnts);
1011 }
1012
1013 while (!QLIST_EMPTY(&blkdev->freelist)) {
1014 ioreq = QLIST_FIRST(&blkdev->freelist);
1015 QLIST_REMOVE(ioreq, list);
1016 qemu_iovec_destroy(&ioreq->v);
1017 g_free(ioreq);
1018 }
1019
1020 g_free(blkdev->params);
1021 g_free(blkdev->mode);
1022 g_free(blkdev->type);
1023 g_free(blkdev->dev);
1024 g_free(blkdev->devtype);
1025 qemu_bh_delete(blkdev->bh);
1026 return 0;
1027 }
1028
1029 static void blk_event(struct XenDevice *xendev)
1030 {
1031 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1032
1033 qemu_bh_schedule(blkdev->bh);
1034 }
1035
1036 struct XenDevOps xen_blkdev_ops = {
1037 .size = sizeof(struct XenBlkDev),
1038 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1039 .alloc = blk_alloc,
1040 .init = blk_init,
1041 .initialise = blk_connect,
1042 .disconnect = blk_disconnect,
1043 .event = blk_event,
1044 .free = blk_free,
1045 };