]> git.proxmox.com Git - mirror_qemu.git/blame - hw/block/xen_disk.c
hw: Convert from BlockDriverState to BlockBackend, mostly
[mirror_qemu.git] / hw / block / xen_disk.c
CommitLineData
62d23efa
AL
1/*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
8167ee88 16 * with this program; if not, see <http://www.gnu.org/licenses/>.
6b620ca3
PB
17 *
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
62d23efa
AL
20 */
21
22#include <stdio.h>
23#include <stdlib.h>
24#include <stdarg.h>
25#include <string.h>
26#include <unistd.h>
27#include <signal.h>
28#include <inttypes.h>
29#include <time.h>
30#include <fcntl.h>
31#include <errno.h>
32#include <sys/ioctl.h>
33#include <sys/types.h>
34#include <sys/stat.h>
35#include <sys/mman.h>
36#include <sys/uio.h>
37
83c9f4ca 38#include "hw/hw.h"
0d09e41a 39#include "hw/xen/xen_backend.h"
47b43a1f 40#include "xen_blkif.h"
9c17d615 41#include "sysemu/blockdev.h"
26f54e9a 42#include "sysemu/block-backend.h"
62d23efa
AL
43
44/* ------------------------------------------------------------- */
45
62d23efa
AL
46static int batch_maps = 0;
47
48static int max_requests = 32;
62d23efa
AL
49
50/* ------------------------------------------------------------- */
51
52#define BLOCK_SIZE 512
53#define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
54
9e496d74
RPM
55struct PersistentGrant {
56 void *page;
57 struct XenBlkDev *blkdev;
58};
59
60typedef struct PersistentGrant PersistentGrant;
61
62d23efa
AL
62struct ioreq {
63 blkif_request_t req;
64 int16_t status;
65
66 /* parsed request */
67 off_t start;
68 QEMUIOVector v;
69 int presync;
70 int postsync;
c6961b7d 71 uint8_t mapped;
62d23efa
AL
72
73 /* grant mapping */
74 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
76 int prot;
77 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
78 void *pages;
9e496d74 79 int num_unmap;
62d23efa
AL
80
81 /* aio status */
82 int aio_inflight;
83 int aio_errors;
84
85 struct XenBlkDev *blkdev;
72cf2d4f 86 QLIST_ENTRY(ioreq) list;
a597e79c 87 BlockAcctCookie acct;
62d23efa
AL
88};
89
90struct XenBlkDev {
91 struct XenDevice xendev; /* must be first */
92 char *params;
93 char *mode;
94 char *type;
95 char *dev;
96 char *devtype;
454ae734 97 bool directiosafe;
62d23efa
AL
98 const char *fileproto;
99 const char *filename;
100 int ring_ref;
101 void *sring;
102 int64_t file_blk;
103 int64_t file_size;
104 int protocol;
105 blkif_back_rings_t rings;
106 int more_work;
107 int cnt_map;
108
109 /* request lists */
72cf2d4f
BS
110 QLIST_HEAD(inflight_head, ioreq) inflight;
111 QLIST_HEAD(finished_head, ioreq) finished;
112 QLIST_HEAD(freelist_head, ioreq) freelist;
62d23efa
AL
113 int requests_total;
114 int requests_inflight;
115 int requests_finished;
116
9e496d74 117 /* Persistent grants extension */
f3135204 118 gboolean feature_discard;
9e496d74
RPM
119 gboolean feature_persistent;
120 GTree *persistent_gnts;
121 unsigned int persistent_gnt_count;
122 unsigned int max_grants;
123
62d23efa 124 /* qemu block driver */
751c6a17 125 DriveInfo *dinfo;
4be74634 126 BlockBackend *blk;
62d23efa
AL
127 QEMUBH *bh;
128};
129
130/* ------------------------------------------------------------- */
131
282c6a2f
RPM
132static void ioreq_reset(struct ioreq *ioreq)
133{
134 memset(&ioreq->req, 0, sizeof(ioreq->req));
135 ioreq->status = 0;
136 ioreq->start = 0;
137 ioreq->presync = 0;
138 ioreq->postsync = 0;
139 ioreq->mapped = 0;
140
141 memset(ioreq->domids, 0, sizeof(ioreq->domids));
142 memset(ioreq->refs, 0, sizeof(ioreq->refs));
143 ioreq->prot = 0;
144 memset(ioreq->page, 0, sizeof(ioreq->page));
145 ioreq->pages = NULL;
146
147 ioreq->aio_inflight = 0;
148 ioreq->aio_errors = 0;
149
150 ioreq->blkdev = NULL;
151 memset(&ioreq->list, 0, sizeof(ioreq->list));
152 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
153
154 qemu_iovec_reset(&ioreq->v);
155}
156
9e496d74
RPM
157static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
158{
159 uint ua = GPOINTER_TO_UINT(a);
160 uint ub = GPOINTER_TO_UINT(b);
161 return (ua > ub) - (ua < ub);
162}
163
164static void destroy_grant(gpointer pgnt)
165{
166 PersistentGrant *grant = pgnt;
167 XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
168
169 if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
170 xen_be_printf(&grant->blkdev->xendev, 0,
171 "xc_gnttab_munmap failed: %s\n",
172 strerror(errno));
173 }
174 grant->blkdev->persistent_gnt_count--;
175 xen_be_printf(&grant->blkdev->xendev, 3,
176 "unmapped grant %p\n", grant->page);
177 g_free(grant);
178}
179
62d23efa
AL
180static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
181{
182 struct ioreq *ioreq = NULL;
183
72cf2d4f 184 if (QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab
AP
185 if (blkdev->requests_total >= max_requests) {
186 goto out;
187 }
188 /* allocate new struct */
7267c094 189 ioreq = g_malloc0(sizeof(*ioreq));
209cd7ab
AP
190 ioreq->blkdev = blkdev;
191 blkdev->requests_total++;
62d23efa
AL
192 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
193 } else {
209cd7ab
AP
194 /* get one from freelist */
195 ioreq = QLIST_FIRST(&blkdev->freelist);
196 QLIST_REMOVE(ioreq, list);
62d23efa 197 }
72cf2d4f 198 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
62d23efa
AL
199 blkdev->requests_inflight++;
200
201out:
202 return ioreq;
203}
204
205static void ioreq_finish(struct ioreq *ioreq)
206{
207 struct XenBlkDev *blkdev = ioreq->blkdev;
208
72cf2d4f
BS
209 QLIST_REMOVE(ioreq, list);
210 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
62d23efa
AL
211 blkdev->requests_inflight--;
212 blkdev->requests_finished++;
213}
214
ed547766 215static void ioreq_release(struct ioreq *ioreq, bool finish)
62d23efa
AL
216{
217 struct XenBlkDev *blkdev = ioreq->blkdev;
218
72cf2d4f 219 QLIST_REMOVE(ioreq, list);
282c6a2f 220 ioreq_reset(ioreq);
62d23efa 221 ioreq->blkdev = blkdev;
72cf2d4f 222 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
ed547766
JB
223 if (finish) {
224 blkdev->requests_finished--;
225 } else {
226 blkdev->requests_inflight--;
227 }
62d23efa
AL
228}
229
230/*
231 * translate request into iovec + start offset
232 * do sanity checks along the way
233 */
234static int ioreq_parse(struct ioreq *ioreq)
235{
236 struct XenBlkDev *blkdev = ioreq->blkdev;
237 uintptr_t mem;
238 size_t len;
239 int i;
240
241 xen_be_printf(&blkdev->xendev, 3,
209cd7ab
AP
242 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
243 ioreq->req.operation, ioreq->req.nr_segments,
244 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
62d23efa
AL
245 switch (ioreq->req.operation) {
246 case BLKIF_OP_READ:
209cd7ab
AP
247 ioreq->prot = PROT_WRITE; /* to memory */
248 break;
7e7b7cba
SS
249 case BLKIF_OP_FLUSH_DISKCACHE:
250 ioreq->presync = 1;
5cbdebe3 251 if (!ioreq->req.nr_segments) {
5cbdebe3
SS
252 return 0;
253 }
209cd7ab 254 /* fall through */
62d23efa 255 case BLKIF_OP_WRITE:
209cd7ab 256 ioreq->prot = PROT_READ; /* from memory */
209cd7ab 257 break;
f3135204
OH
258 case BLKIF_OP_DISCARD:
259 return 0;
62d23efa 260 default:
209cd7ab
AP
261 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
262 ioreq->req.operation);
263 goto err;
62d23efa
AL
264 };
265
908c7b9f
GH
266 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
267 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
268 goto err;
269 }
270
62d23efa
AL
271 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
272 for (i = 0; i < ioreq->req.nr_segments; i++) {
209cd7ab
AP
273 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
274 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
275 goto err;
276 }
277 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
278 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
279 goto err;
280 }
281 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
282 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
283 goto err;
284 }
285
286 ioreq->domids[i] = blkdev->xendev.dom;
287 ioreq->refs[i] = ioreq->req.seg[i].gref;
288
289 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
290 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
62d23efa
AL
291 qemu_iovec_add(&ioreq->v, (void*)mem, len);
292 }
293 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
209cd7ab
AP
294 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
295 goto err;
62d23efa
AL
296 }
297 return 0;
298
299err:
300 ioreq->status = BLKIF_RSP_ERROR;
301 return -1;
302}
303
304static void ioreq_unmap(struct ioreq *ioreq)
305{
d5b93ddf 306 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
62d23efa
AL
307 int i;
308
9e496d74 309 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
62d23efa 310 return;
209cd7ab 311 }
62d23efa 312 if (batch_maps) {
209cd7ab
AP
313 if (!ioreq->pages) {
314 return;
315 }
9e496d74 316 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
209cd7ab
AP
317 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
318 strerror(errno));
319 }
9e496d74 320 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
209cd7ab 321 ioreq->pages = NULL;
62d23efa 322 } else {
9e496d74 323 for (i = 0; i < ioreq->num_unmap; i++) {
209cd7ab
AP
324 if (!ioreq->page[i]) {
325 continue;
326 }
327 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
328 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
329 strerror(errno));
330 }
331 ioreq->blkdev->cnt_map--;
332 ioreq->page[i] = NULL;
333 }
62d23efa 334 }
c6961b7d 335 ioreq->mapped = 0;
62d23efa
AL
336}
337
338static int ioreq_map(struct ioreq *ioreq)
339{
d5b93ddf 340 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
9e496d74
RPM
341 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
342 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
343 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
344 int i, j, new_maps = 0;
345 PersistentGrant *grant;
346 /* domids and refs variables will contain the information necessary
347 * to map the grants that are needed to fulfill this request.
348 *
349 * After mapping the needed grants, the page array will contain the
350 * memory address of each granted page in the order specified in ioreq
351 * (disregarding if it's a persistent grant or not).
352 */
62d23efa 353
c6961b7d 354 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
62d23efa 355 return 0;
209cd7ab 356 }
9e496d74
RPM
357 if (ioreq->blkdev->feature_persistent) {
358 for (i = 0; i < ioreq->v.niov; i++) {
359 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
360 GUINT_TO_POINTER(ioreq->refs[i]));
361
362 if (grant != NULL) {
363 page[i] = grant->page;
364 xen_be_printf(&ioreq->blkdev->xendev, 3,
365 "using persistent-grant %" PRIu32 "\n",
366 ioreq->refs[i]);
367 } else {
368 /* Add the grant to the list of grants that
369 * should be mapped
370 */
371 domids[new_maps] = ioreq->domids[i];
372 refs[new_maps] = ioreq->refs[i];
373 page[i] = NULL;
374 new_maps++;
375 }
376 }
377 /* Set the protection to RW, since grants may be reused later
378 * with a different protection than the one needed for this request
379 */
380 ioreq->prot = PROT_WRITE | PROT_READ;
381 } else {
382 /* All grants in the request should be mapped */
383 memcpy(refs, ioreq->refs, sizeof(refs));
384 memcpy(domids, ioreq->domids, sizeof(domids));
385 memset(page, 0, sizeof(page));
386 new_maps = ioreq->v.niov;
387 }
388
389 if (batch_maps && new_maps) {
209cd7ab 390 ioreq->pages = xc_gnttab_map_grant_refs
9e496d74 391 (gnt, new_maps, domids, refs, ioreq->prot);
209cd7ab
AP
392 if (ioreq->pages == NULL) {
393 xen_be_printf(&ioreq->blkdev->xendev, 0,
394 "can't map %d grant refs (%s, %d maps)\n",
9e496d74 395 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
209cd7ab
AP
396 return -1;
397 }
9e496d74
RPM
398 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
399 if (page[i] == NULL) {
400 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
401 }
209cd7ab 402 }
9e496d74
RPM
403 ioreq->blkdev->cnt_map += new_maps;
404 } else if (new_maps) {
405 for (i = 0; i < new_maps; i++) {
209cd7ab 406 ioreq->page[i] = xc_gnttab_map_grant_ref
9e496d74 407 (gnt, domids[i], refs[i], ioreq->prot);
209cd7ab
AP
408 if (ioreq->page[i] == NULL) {
409 xen_be_printf(&ioreq->blkdev->xendev, 0,
410 "can't map grant ref %d (%s, %d maps)\n",
9e496d74 411 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
a76f48e5 412 ioreq->mapped = 1;
209cd7ab
AP
413 ioreq_unmap(ioreq);
414 return -1;
415 }
209cd7ab
AP
416 ioreq->blkdev->cnt_map++;
417 }
9e496d74
RPM
418 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
419 if (page[i] == NULL) {
420 page[i] = ioreq->page[j++];
421 }
422 }
423 }
424 if (ioreq->blkdev->feature_persistent) {
425 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
426 && new_maps) {
427 /* Go through the list of newly mapped grants and add as many
428 * as possible to the list of persistently mapped grants.
429 *
430 * Since we start at the end of ioreq->page(s), we only need
431 * to decrease new_maps to prevent this granted pages from
432 * being unmapped in ioreq_unmap.
433 */
434 grant = g_malloc0(sizeof(*grant));
435 new_maps--;
436 if (batch_maps) {
437 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
438 } else {
439 grant->page = ioreq->page[new_maps];
440 }
441 grant->blkdev = ioreq->blkdev;
442 xen_be_printf(&ioreq->blkdev->xendev, 3,
443 "adding grant %" PRIu32 " page: %p\n",
444 refs[new_maps], grant->page);
445 g_tree_insert(ioreq->blkdev->persistent_gnts,
446 GUINT_TO_POINTER(refs[new_maps]),
447 grant);
448 ioreq->blkdev->persistent_gnt_count++;
449 }
450 }
451 for (i = 0; i < ioreq->v.niov; i++) {
452 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
62d23efa 453 }
c6961b7d 454 ioreq->mapped = 1;
9e496d74 455 ioreq->num_unmap = new_maps;
62d23efa
AL
456 return 0;
457}
458
c6961b7d
SS
459static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
460
62d23efa
AL
461static void qemu_aio_complete(void *opaque, int ret)
462{
463 struct ioreq *ioreq = opaque;
464
465 if (ret != 0) {
466 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
467 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
468 ioreq->aio_errors++;
469 }
470
471 ioreq->aio_inflight--;
c6961b7d
SS
472 if (ioreq->presync) {
473 ioreq->presync = 0;
474 ioreq_runio_qemu_aio(ioreq);
475 return;
476 }
209cd7ab 477 if (ioreq->aio_inflight > 0) {
62d23efa 478 return;
209cd7ab 479 }
d56de074 480 if (ioreq->postsync) {
c6961b7d
SS
481 ioreq->postsync = 0;
482 ioreq->aio_inflight++;
4be74634 483 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
c6961b7d 484 return;
d56de074 485 }
62d23efa
AL
486
487 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
488 ioreq_unmap(ioreq);
489 ioreq_finish(ioreq);
58da5b1e
OH
490 switch (ioreq->req.operation) {
491 case BLKIF_OP_WRITE:
492 case BLKIF_OP_FLUSH_DISKCACHE:
493 if (!ioreq->req.nr_segments) {
494 break;
495 }
496 case BLKIF_OP_READ:
4be74634 497 block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
58da5b1e 498 break;
f3135204 499 case BLKIF_OP_DISCARD:
58da5b1e
OH
500 default:
501 break;
502 }
62d23efa
AL
503 qemu_bh_schedule(ioreq->blkdev->bh);
504}
505
506static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
507{
508 struct XenBlkDev *blkdev = ioreq->blkdev;
509
209cd7ab
AP
510 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
511 goto err_no_map;
512 }
62d23efa
AL
513
514 ioreq->aio_inflight++;
209cd7ab 515 if (ioreq->presync) {
4be74634 516 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
c6961b7d 517 return 0;
209cd7ab 518 }
62d23efa
AL
519
520 switch (ioreq->req.operation) {
521 case BLKIF_OP_READ:
4be74634 522 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
5366d0c8 523 ioreq->v.size, BLOCK_ACCT_READ);
62d23efa 524 ioreq->aio_inflight++;
4be74634
MA
525 blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE,
526 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
527 qemu_aio_complete, ioreq);
209cd7ab 528 break;
62d23efa 529 case BLKIF_OP_WRITE:
7e7b7cba 530 case BLKIF_OP_FLUSH_DISKCACHE:
209cd7ab 531 if (!ioreq->req.nr_segments) {
5cbdebe3 532 break;
209cd7ab 533 }
a597e79c 534
4be74634 535 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
5366d0c8 536 ioreq->v.size, BLOCK_ACCT_WRITE);
209bef3e 537 ioreq->aio_inflight++;
4be74634
MA
538 blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
539 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
540 qemu_aio_complete, ioreq);
209cd7ab 541 break;
f3135204
OH
542 case BLKIF_OP_DISCARD:
543 {
544 struct blkif_request_discard *discard_req = (void *)&ioreq->req;
545 ioreq->aio_inflight++;
4be74634 546 blk_aio_discard(blkdev->blk,
f3135204
OH
547 discard_req->sector_number, discard_req->nr_sectors,
548 qemu_aio_complete, ioreq);
549 break;
550 }
62d23efa 551 default:
209cd7ab
AP
552 /* unknown operation (shouldn't happen -- parse catches this) */
553 goto err;
62d23efa
AL
554 }
555
62d23efa
AL
556 qemu_aio_complete(ioreq, 0);
557
558 return 0;
559
560err:
f6ec953c
FZ
561 ioreq_unmap(ioreq);
562err_no_map:
563 ioreq_finish(ioreq);
62d23efa
AL
564 ioreq->status = BLKIF_RSP_ERROR;
565 return -1;
566}
567
568static int blk_send_response_one(struct ioreq *ioreq)
569{
570 struct XenBlkDev *blkdev = ioreq->blkdev;
571 int send_notify = 0;
572 int have_requests = 0;
573 blkif_response_t resp;
574 void *dst;
575
576 resp.id = ioreq->req.id;
577 resp.operation = ioreq->req.operation;
578 resp.status = ioreq->status;
579
580 /* Place on the response ring for the relevant domain. */
581 switch (blkdev->protocol) {
582 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
583 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
584 break;
62d23efa 585 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
586 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
587 blkdev->rings.x86_32_part.rsp_prod_pvt);
209cd7ab 588 break;
62d23efa 589 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
590 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
591 blkdev->rings.x86_64_part.rsp_prod_pvt);
209cd7ab 592 break;
62d23efa 593 default:
209cd7ab 594 dst = NULL;
8cced121 595 return 0;
62d23efa
AL
596 }
597 memcpy(dst, &resp, sizeof(resp));
598 blkdev->rings.common.rsp_prod_pvt++;
599
600 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
601 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
209cd7ab
AP
602 /*
603 * Tail check for pending requests. Allows frontend to avoid
604 * notifications if requests are already in flight (lower
605 * overheads and promotes batching).
606 */
607 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
62d23efa 608 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
209cd7ab 609 have_requests = 1;
62d23efa
AL
610 }
611
209cd7ab
AP
612 if (have_requests) {
613 blkdev->more_work++;
614 }
62d23efa
AL
615 return send_notify;
616}
617
618/* walk finished list, send outstanding responses, free requests */
619static void blk_send_response_all(struct XenBlkDev *blkdev)
620{
621 struct ioreq *ioreq;
622 int send_notify = 0;
623
72cf2d4f
BS
624 while (!QLIST_EMPTY(&blkdev->finished)) {
625 ioreq = QLIST_FIRST(&blkdev->finished);
209cd7ab 626 send_notify += blk_send_response_one(ioreq);
ed547766 627 ioreq_release(ioreq, true);
209cd7ab
AP
628 }
629 if (send_notify) {
630 xen_be_send_notify(&blkdev->xendev);
62d23efa 631 }
62d23efa
AL
632}
633
634static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
635{
636 switch (blkdev->protocol) {
637 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
638 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
639 sizeof(ioreq->req));
640 break;
62d23efa 641 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
642 blkif_get_x86_32_req(&ioreq->req,
643 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
209cd7ab 644 break;
62d23efa 645 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
646 blkif_get_x86_64_req(&ioreq->req,
647 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
209cd7ab 648 break;
62d23efa
AL
649 }
650 return 0;
651}
652
653static void blk_handle_requests(struct XenBlkDev *blkdev)
654{
655 RING_IDX rc, rp;
656 struct ioreq *ioreq;
657
658 blkdev->more_work = 0;
659
660 rc = blkdev->rings.common.req_cons;
661 rp = blkdev->rings.common.sring->req_prod;
662 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
663
4e5b184d 664 blk_send_response_all(blkdev);
fc1f79f7 665 while (rc != rp) {
62d23efa 666 /* pull request from ring */
209cd7ab 667 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
62d23efa 668 break;
209cd7ab 669 }
62d23efa
AL
670 ioreq = ioreq_start(blkdev);
671 if (ioreq == NULL) {
672 blkdev->more_work++;
673 break;
674 }
675 blk_get_request(blkdev, ioreq, rc);
676 blkdev->rings.common.req_cons = ++rc;
677
678 /* parse them */
679 if (ioreq_parse(ioreq) != 0) {
209cd7ab 680 if (blk_send_response_one(ioreq)) {
62d23efa 681 xen_be_send_notify(&blkdev->xendev);
209cd7ab 682 }
ed547766 683 ioreq_release(ioreq, false);
62d23efa
AL
684 continue;
685 }
686
4e5b184d 687 ioreq_runio_qemu_aio(ioreq);
209cd7ab 688 }
62d23efa 689
209cd7ab 690 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
62d23efa 691 qemu_bh_schedule(blkdev->bh);
209cd7ab 692 }
62d23efa
AL
693}
694
695/* ------------------------------------------------------------- */
696
697static void blk_bh(void *opaque)
698{
699 struct XenBlkDev *blkdev = opaque;
700 blk_handle_requests(blkdev);
701}
702
64c27e5b
JB
703/*
704 * We need to account for the grant allocations requiring contiguous
705 * chunks; the worst case number would be
706 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
707 * but in order to keep things simple just use
708 * 2 * max_req * max_seg.
709 */
710#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
711
62d23efa
AL
712static void blk_alloc(struct XenDevice *xendev)
713{
714 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
715
72cf2d4f
BS
716 QLIST_INIT(&blkdev->inflight);
717 QLIST_INIT(&blkdev->finished);
718 QLIST_INIT(&blkdev->freelist);
62d23efa 719 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
209cd7ab 720 if (xen_mode != XEN_EMULATE) {
62d23efa 721 batch_maps = 1;
209cd7ab 722 }
64c27e5b
JB
723 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
724 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
725 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
726 strerror(errno));
727 }
62d23efa
AL
728}
729
f3135204
OH
730static void blk_parse_discard(struct XenBlkDev *blkdev)
731{
732 int enable;
733
734 blkdev->feature_discard = true;
735
736 if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
737 blkdev->feature_discard = !!enable;
738 }
739
740 if (blkdev->feature_discard) {
741 xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
742 }
743}
744
62d23efa
AL
745static int blk_init(struct XenDevice *xendev)
746{
747 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
86f425db 748 int info = 0;
454ae734 749 char *directiosafe = NULL;
62d23efa
AL
750
751 /* read xenstore entries */
752 if (blkdev->params == NULL) {
5ea3c2b4 753 char *h = NULL;
209cd7ab 754 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
5ea3c2b4
SS
755 if (blkdev->params != NULL) {
756 h = strchr(blkdev->params, ':');
757 }
209cd7ab
AP
758 if (h != NULL) {
759 blkdev->fileproto = blkdev->params;
760 blkdev->filename = h+1;
761 *h = 0;
762 } else {
763 blkdev->fileproto = "<unset>";
764 blkdev->filename = blkdev->params;
765 }
766 }
7cef3f4f
SS
767 if (!strcmp("aio", blkdev->fileproto)) {
768 blkdev->fileproto = "raw";
769 }
209cd7ab
AP
770 if (blkdev->mode == NULL) {
771 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
772 }
773 if (blkdev->type == NULL) {
774 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
775 }
776 if (blkdev->dev == NULL) {
777 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
778 }
779 if (blkdev->devtype == NULL) {
780 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
781 }
454ae734
SS
782 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
783 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
62d23efa
AL
784
785 /* do we have all we need? */
786 if (blkdev->params == NULL ||
209cd7ab
AP
787 blkdev->mode == NULL ||
788 blkdev->type == NULL ||
789 blkdev->dev == NULL) {
5ea3c2b4 790 goto out_error;
209cd7ab 791 }
62d23efa
AL
792
793 /* read-only ? */
86f425db 794 if (strcmp(blkdev->mode, "w")) {
209cd7ab 795 info |= VDISK_READONLY;
62d23efa
AL
796 }
797
798 /* cdrom ? */
209cd7ab
AP
799 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
800 info |= VDISK_CDROM;
801 }
62d23efa 802
86f425db
AB
803 blkdev->file_blk = BLOCK_SIZE;
804
805 /* fill info
806 * blk_connect supplies sector-size and sectors
807 */
808 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
809 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
810 xenstore_write_be_int(&blkdev->xendev, "info", info);
454ae734 811
f3135204
OH
812 blk_parse_discard(blkdev);
813
454ae734 814 g_free(directiosafe);
86f425db
AB
815 return 0;
816
817out_error:
818 g_free(blkdev->params);
819 blkdev->params = NULL;
820 g_free(blkdev->mode);
821 blkdev->mode = NULL;
822 g_free(blkdev->type);
823 blkdev->type = NULL;
824 g_free(blkdev->dev);
825 blkdev->dev = NULL;
826 g_free(blkdev->devtype);
827 blkdev->devtype = NULL;
454ae734
SS
828 g_free(directiosafe);
829 blkdev->directiosafe = false;
86f425db
AB
830 return -1;
831}
832
833static int blk_connect(struct XenDevice *xendev)
834{
835 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
836 int pers, index, qflags;
b64ec4e4 837 bool readonly = true;
86f425db
AB
838
839 /* read-only ? */
454ae734
SS
840 if (blkdev->directiosafe) {
841 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
842 } else {
843 qflags = BDRV_O_CACHE_WB;
844 }
86f425db
AB
845 if (strcmp(blkdev->mode, "w") == 0) {
846 qflags |= BDRV_O_RDWR;
b64ec4e4 847 readonly = false;
86f425db 848 }
f3135204
OH
849 if (blkdev->feature_discard) {
850 qflags |= BDRV_O_UNMAP;
851 }
86f425db 852
62d23efa 853 /* init qemu block driver */
751c6a17
GH
854 index = (blkdev->xendev.dev - 202 * 256) / 16;
855 blkdev->dinfo = drive_get(IF_XEN, 0, index);
856 if (!blkdev->dinfo) {
98522f63 857 Error *local_err = NULL;
26f54e9a 858 BlockBackend *blk;
cedccf13 859 BlockDriver *drv;
4be74634 860 BlockDriverState *bs;
cedccf13 861
62d23efa
AL
862 /* setup via xenbus -> create new block driver instance */
863 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
7e7d56d9 864 blk = blk_new_with_bs(blkdev->dev, NULL);
26f54e9a
MA
865 if (!blk) {
866 return -1;
867 }
4be74634 868 blkdev->blk = blk;
cedccf13 869
4be74634 870 bs = blk_bs(blk);
cedccf13 871 drv = bdrv_find_whitelisted_format(blkdev->fileproto, readonly);
4be74634 872 if (bdrv_open(&bs, blkdev->filename, NULL, NULL, qflags,
cedccf13
MA
873 drv, &local_err) != 0) {
874 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
875 error_get_pretty(local_err));
876 error_free(local_err);
26f54e9a 877 blk_unref(blk);
4be74634 878 blkdev->blk = NULL;
cedccf13
MA
879 return -1;
880 }
4be74634 881 assert(bs == blk_bs(blk));
62d23efa
AL
882 } else {
883 /* setup via qemu cmdline -> already setup for us */
884 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
4be74634
MA
885 blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
886 if (blk_is_read_only(blkdev->blk) && !readonly) {
4f8a066b 887 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
4be74634 888 blkdev->blk = NULL;
4f8a066b
KW
889 return -1;
890 }
4be74634
MA
891 /* blkdev->blk is not create by us, we get a reference
892 * so we can blk_unref() unconditionally */
893 blk_ref(blkdev->blk);
894 }
895 blk_attach_dev_nofail(blkdev->blk, blkdev);
896 blkdev->file_size = blk_getlength(blkdev->blk);
62d23efa 897 if (blkdev->file_size < 0) {
4be74634 898 xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
62d23efa 899 (int)blkdev->file_size, strerror(-blkdev->file_size),
4be74634 900 bdrv_get_format_name(blk_bs(blkdev->blk)) ?: "-");
209cd7ab 901 blkdev->file_size = 0;
62d23efa 902 }
62d23efa
AL
903
904 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
209cd7ab
AP
905 " size %" PRId64 " (%" PRId64 " MB)\n",
906 blkdev->type, blkdev->fileproto, blkdev->filename,
907 blkdev->file_size, blkdev->file_size >> 20);
62d23efa 908
86f425db
AB
909 /* Fill in number of sector size and number of sectors */
910 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
9246ce88
FF
911 xenstore_write_be_int64(&blkdev->xendev, "sectors",
912 blkdev->file_size / blkdev->file_blk);
62d23efa 913
209cd7ab
AP
914 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
915 return -1;
916 }
62d23efa 917 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
209cd7ab
AP
918 &blkdev->xendev.remote_port) == -1) {
919 return -1;
920 }
9e496d74
RPM
921 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
922 blkdev->feature_persistent = FALSE;
923 } else {
924 blkdev->feature_persistent = !!pers;
925 }
62d23efa
AL
926
927 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
928 if (blkdev->xendev.protocol) {
209cd7ab 929 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
62d23efa 930 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
209cd7ab
AP
931 }
932 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
62d23efa 933 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
209cd7ab 934 }
62d23efa
AL
935 }
936
937 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
209cd7ab
AP
938 blkdev->xendev.dom,
939 blkdev->ring_ref,
940 PROT_READ | PROT_WRITE);
941 if (!blkdev->sring) {
942 return -1;
943 }
62d23efa
AL
944 blkdev->cnt_map++;
945
946 switch (blkdev->protocol) {
947 case BLKIF_PROTOCOL_NATIVE:
948 {
209cd7ab
AP
949 blkif_sring_t *sring_native = blkdev->sring;
950 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
951 break;
62d23efa
AL
952 }
953 case BLKIF_PROTOCOL_X86_32:
954 {
209cd7ab 955 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
6fcfeff9
BS
956
957 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
209cd7ab 958 break;
62d23efa
AL
959 }
960 case BLKIF_PROTOCOL_X86_64:
961 {
209cd7ab 962 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
6fcfeff9
BS
963
964 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
209cd7ab 965 break;
62d23efa
AL
966 }
967 }
968
9e496d74
RPM
969 if (blkdev->feature_persistent) {
970 /* Init persistent grants */
971 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
972 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
973 NULL, NULL,
974 (GDestroyNotify)destroy_grant);
975 blkdev->persistent_gnt_count = 0;
976 }
977
62d23efa
AL
978 xen_be_bind_evtchn(&blkdev->xendev);
979
980 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
209cd7ab
AP
981 "remote port %d, local port %d\n",
982 blkdev->xendev.protocol, blkdev->ring_ref,
983 blkdev->xendev.remote_port, blkdev->xendev.local_port);
62d23efa
AL
984 return 0;
985}
986
987static void blk_disconnect(struct XenDevice *xendev)
988{
989 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
990
4be74634
MA
991 if (blkdev->blk) {
992 blk_detach_dev(blkdev->blk, blkdev);
993 blk_unref(blkdev->blk);
994 blkdev->blk = NULL;
62d23efa
AL
995 }
996 xen_be_unbind_evtchn(&blkdev->xendev);
997
998 if (blkdev->sring) {
209cd7ab
AP
999 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1000 blkdev->cnt_map--;
1001 blkdev->sring = NULL;
62d23efa
AL
1002 }
1003}
1004
1005static int blk_free(struct XenDevice *xendev)
1006{
1007 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1008 struct ioreq *ioreq;
1009
4be74634 1010 if (blkdev->blk || blkdev->sring) {
77ba8fef
SS
1011 blk_disconnect(xendev);
1012 }
1013
9e496d74
RPM
1014 /* Free persistent grants */
1015 if (blkdev->feature_persistent) {
1016 g_tree_destroy(blkdev->persistent_gnts);
1017 }
1018
72cf2d4f 1019 while (!QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab 1020 ioreq = QLIST_FIRST(&blkdev->freelist);
72cf2d4f 1021 QLIST_REMOVE(ioreq, list);
62d23efa 1022 qemu_iovec_destroy(&ioreq->v);
7267c094 1023 g_free(ioreq);
62d23efa
AL
1024 }
1025
7267c094
AL
1026 g_free(blkdev->params);
1027 g_free(blkdev->mode);
1028 g_free(blkdev->type);
1029 g_free(blkdev->dev);
1030 g_free(blkdev->devtype);
62d23efa
AL
1031 qemu_bh_delete(blkdev->bh);
1032 return 0;
1033}
1034
1035static void blk_event(struct XenDevice *xendev)
1036{
1037 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1038
1039 qemu_bh_schedule(blkdev->bh);
1040}
1041
1042struct XenDevOps xen_blkdev_ops = {
1043 .size = sizeof(struct XenBlkDev),
1044 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1045 .alloc = blk_alloc,
1046 .init = blk_init,
384087b2 1047 .initialise = blk_connect,
62d23efa
AL
1048 .disconnect = blk_disconnect,
1049 .event = blk_event,
1050 .free = blk_free,
1051};