]> git.proxmox.com Git - mirror_qemu.git/blame - hw/block/xen_disk.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / hw / block / xen_disk.c
CommitLineData
62d23efa
AL
1/*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
8167ee88 16 * with this program; if not, see <http://www.gnu.org/licenses/>.
6b620ca3
PB
17 *
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
62d23efa
AL
20 */
21
22#include <stdio.h>
23#include <stdlib.h>
24#include <stdarg.h>
25#include <string.h>
26#include <unistd.h>
27#include <signal.h>
28#include <inttypes.h>
29#include <time.h>
30#include <fcntl.h>
31#include <errno.h>
32#include <sys/ioctl.h>
33#include <sys/types.h>
34#include <sys/stat.h>
35#include <sys/mman.h>
36#include <sys/uio.h>
37
83c9f4ca 38#include "hw/hw.h"
0d09e41a 39#include "hw/xen/xen_backend.h"
47b43a1f 40#include "xen_blkif.h"
9c17d615 41#include "sysemu/blockdev.h"
62d23efa
AL
42
43/* ------------------------------------------------------------- */
44
62d23efa
AL
45static int batch_maps = 0;
46
47static int max_requests = 32;
62d23efa
AL
48
49/* ------------------------------------------------------------- */
50
51#define BLOCK_SIZE 512
52#define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
53
9e496d74
RPM
54struct PersistentGrant {
55 void *page;
56 struct XenBlkDev *blkdev;
57};
58
59typedef struct PersistentGrant PersistentGrant;
60
62d23efa
AL
61struct ioreq {
62 blkif_request_t req;
63 int16_t status;
64
65 /* parsed request */
66 off_t start;
67 QEMUIOVector v;
68 int presync;
69 int postsync;
c6961b7d 70 uint8_t mapped;
62d23efa
AL
71
72 /* grant mapping */
73 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75 int prot;
76 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
77 void *pages;
9e496d74 78 int num_unmap;
62d23efa
AL
79
80 /* aio status */
81 int aio_inflight;
82 int aio_errors;
83
84 struct XenBlkDev *blkdev;
72cf2d4f 85 QLIST_ENTRY(ioreq) list;
a597e79c 86 BlockAcctCookie acct;
62d23efa
AL
87};
88
89struct XenBlkDev {
90 struct XenDevice xendev; /* must be first */
91 char *params;
92 char *mode;
93 char *type;
94 char *dev;
95 char *devtype;
454ae734 96 bool directiosafe;
62d23efa
AL
97 const char *fileproto;
98 const char *filename;
99 int ring_ref;
100 void *sring;
101 int64_t file_blk;
102 int64_t file_size;
103 int protocol;
104 blkif_back_rings_t rings;
105 int more_work;
106 int cnt_map;
107
108 /* request lists */
72cf2d4f
BS
109 QLIST_HEAD(inflight_head, ioreq) inflight;
110 QLIST_HEAD(finished_head, ioreq) finished;
111 QLIST_HEAD(freelist_head, ioreq) freelist;
62d23efa
AL
112 int requests_total;
113 int requests_inflight;
114 int requests_finished;
115
9e496d74
RPM
116 /* Persistent grants extension */
117 gboolean feature_persistent;
118 GTree *persistent_gnts;
119 unsigned int persistent_gnt_count;
120 unsigned int max_grants;
121
62d23efa 122 /* qemu block driver */
751c6a17 123 DriveInfo *dinfo;
62d23efa
AL
124 BlockDriverState *bs;
125 QEMUBH *bh;
126};
127
128/* ------------------------------------------------------------- */
129
282c6a2f
RPM
130static void ioreq_reset(struct ioreq *ioreq)
131{
132 memset(&ioreq->req, 0, sizeof(ioreq->req));
133 ioreq->status = 0;
134 ioreq->start = 0;
135 ioreq->presync = 0;
136 ioreq->postsync = 0;
137 ioreq->mapped = 0;
138
139 memset(ioreq->domids, 0, sizeof(ioreq->domids));
140 memset(ioreq->refs, 0, sizeof(ioreq->refs));
141 ioreq->prot = 0;
142 memset(ioreq->page, 0, sizeof(ioreq->page));
143 ioreq->pages = NULL;
144
145 ioreq->aio_inflight = 0;
146 ioreq->aio_errors = 0;
147
148 ioreq->blkdev = NULL;
149 memset(&ioreq->list, 0, sizeof(ioreq->list));
150 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
151
152 qemu_iovec_reset(&ioreq->v);
153}
154
9e496d74
RPM
155static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
156{
157 uint ua = GPOINTER_TO_UINT(a);
158 uint ub = GPOINTER_TO_UINT(b);
159 return (ua > ub) - (ua < ub);
160}
161
162static void destroy_grant(gpointer pgnt)
163{
164 PersistentGrant *grant = pgnt;
165 XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
166
167 if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
168 xen_be_printf(&grant->blkdev->xendev, 0,
169 "xc_gnttab_munmap failed: %s\n",
170 strerror(errno));
171 }
172 grant->blkdev->persistent_gnt_count--;
173 xen_be_printf(&grant->blkdev->xendev, 3,
174 "unmapped grant %p\n", grant->page);
175 g_free(grant);
176}
177
62d23efa
AL
178static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
179{
180 struct ioreq *ioreq = NULL;
181
72cf2d4f 182 if (QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab
AP
183 if (blkdev->requests_total >= max_requests) {
184 goto out;
185 }
186 /* allocate new struct */
7267c094 187 ioreq = g_malloc0(sizeof(*ioreq));
209cd7ab
AP
188 ioreq->blkdev = blkdev;
189 blkdev->requests_total++;
62d23efa
AL
190 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
191 } else {
209cd7ab
AP
192 /* get one from freelist */
193 ioreq = QLIST_FIRST(&blkdev->freelist);
194 QLIST_REMOVE(ioreq, list);
62d23efa 195 }
72cf2d4f 196 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
62d23efa
AL
197 blkdev->requests_inflight++;
198
199out:
200 return ioreq;
201}
202
203static void ioreq_finish(struct ioreq *ioreq)
204{
205 struct XenBlkDev *blkdev = ioreq->blkdev;
206
72cf2d4f
BS
207 QLIST_REMOVE(ioreq, list);
208 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
62d23efa
AL
209 blkdev->requests_inflight--;
210 blkdev->requests_finished++;
211}
212
ed547766 213static void ioreq_release(struct ioreq *ioreq, bool finish)
62d23efa
AL
214{
215 struct XenBlkDev *blkdev = ioreq->blkdev;
216
72cf2d4f 217 QLIST_REMOVE(ioreq, list);
282c6a2f 218 ioreq_reset(ioreq);
62d23efa 219 ioreq->blkdev = blkdev;
72cf2d4f 220 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
ed547766
JB
221 if (finish) {
222 blkdev->requests_finished--;
223 } else {
224 blkdev->requests_inflight--;
225 }
62d23efa
AL
226}
227
228/*
229 * translate request into iovec + start offset
230 * do sanity checks along the way
231 */
232static int ioreq_parse(struct ioreq *ioreq)
233{
234 struct XenBlkDev *blkdev = ioreq->blkdev;
235 uintptr_t mem;
236 size_t len;
237 int i;
238
239 xen_be_printf(&blkdev->xendev, 3,
209cd7ab
AP
240 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
241 ioreq->req.operation, ioreq->req.nr_segments,
242 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
62d23efa
AL
243 switch (ioreq->req.operation) {
244 case BLKIF_OP_READ:
209cd7ab
AP
245 ioreq->prot = PROT_WRITE; /* to memory */
246 break;
7e7b7cba
SS
247 case BLKIF_OP_FLUSH_DISKCACHE:
248 ioreq->presync = 1;
5cbdebe3 249 if (!ioreq->req.nr_segments) {
5cbdebe3
SS
250 return 0;
251 }
209cd7ab 252 /* fall through */
62d23efa 253 case BLKIF_OP_WRITE:
209cd7ab 254 ioreq->prot = PROT_READ; /* from memory */
209cd7ab 255 break;
62d23efa 256 default:
209cd7ab
AP
257 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
258 ioreq->req.operation);
259 goto err;
62d23efa
AL
260 };
261
908c7b9f
GH
262 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
263 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
264 goto err;
265 }
266
62d23efa
AL
267 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
268 for (i = 0; i < ioreq->req.nr_segments; i++) {
209cd7ab
AP
269 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
270 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
271 goto err;
272 }
273 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
274 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
275 goto err;
276 }
277 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
278 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
279 goto err;
280 }
281
282 ioreq->domids[i] = blkdev->xendev.dom;
283 ioreq->refs[i] = ioreq->req.seg[i].gref;
284
285 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
286 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
62d23efa
AL
287 qemu_iovec_add(&ioreq->v, (void*)mem, len);
288 }
289 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
209cd7ab
AP
290 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
291 goto err;
62d23efa
AL
292 }
293 return 0;
294
295err:
296 ioreq->status = BLKIF_RSP_ERROR;
297 return -1;
298}
299
300static void ioreq_unmap(struct ioreq *ioreq)
301{
d5b93ddf 302 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
62d23efa
AL
303 int i;
304
9e496d74 305 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
62d23efa 306 return;
209cd7ab 307 }
62d23efa 308 if (batch_maps) {
209cd7ab
AP
309 if (!ioreq->pages) {
310 return;
311 }
9e496d74 312 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
209cd7ab
AP
313 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
314 strerror(errno));
315 }
9e496d74 316 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
209cd7ab 317 ioreq->pages = NULL;
62d23efa 318 } else {
9e496d74 319 for (i = 0; i < ioreq->num_unmap; i++) {
209cd7ab
AP
320 if (!ioreq->page[i]) {
321 continue;
322 }
323 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
324 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
325 strerror(errno));
326 }
327 ioreq->blkdev->cnt_map--;
328 ioreq->page[i] = NULL;
329 }
62d23efa 330 }
c6961b7d 331 ioreq->mapped = 0;
62d23efa
AL
332}
333
334static int ioreq_map(struct ioreq *ioreq)
335{
d5b93ddf 336 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
9e496d74
RPM
337 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
338 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
339 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
340 int i, j, new_maps = 0;
341 PersistentGrant *grant;
342 /* domids and refs variables will contain the information necessary
343 * to map the grants that are needed to fulfill this request.
344 *
345 * After mapping the needed grants, the page array will contain the
346 * memory address of each granted page in the order specified in ioreq
347 * (disregarding if it's a persistent grant or not).
348 */
62d23efa 349
c6961b7d 350 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
62d23efa 351 return 0;
209cd7ab 352 }
9e496d74
RPM
353 if (ioreq->blkdev->feature_persistent) {
354 for (i = 0; i < ioreq->v.niov; i++) {
355 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
356 GUINT_TO_POINTER(ioreq->refs[i]));
357
358 if (grant != NULL) {
359 page[i] = grant->page;
360 xen_be_printf(&ioreq->blkdev->xendev, 3,
361 "using persistent-grant %" PRIu32 "\n",
362 ioreq->refs[i]);
363 } else {
364 /* Add the grant to the list of grants that
365 * should be mapped
366 */
367 domids[new_maps] = ioreq->domids[i];
368 refs[new_maps] = ioreq->refs[i];
369 page[i] = NULL;
370 new_maps++;
371 }
372 }
373 /* Set the protection to RW, since grants may be reused later
374 * with a different protection than the one needed for this request
375 */
376 ioreq->prot = PROT_WRITE | PROT_READ;
377 } else {
378 /* All grants in the request should be mapped */
379 memcpy(refs, ioreq->refs, sizeof(refs));
380 memcpy(domids, ioreq->domids, sizeof(domids));
381 memset(page, 0, sizeof(page));
382 new_maps = ioreq->v.niov;
383 }
384
385 if (batch_maps && new_maps) {
209cd7ab 386 ioreq->pages = xc_gnttab_map_grant_refs
9e496d74 387 (gnt, new_maps, domids, refs, ioreq->prot);
209cd7ab
AP
388 if (ioreq->pages == NULL) {
389 xen_be_printf(&ioreq->blkdev->xendev, 0,
390 "can't map %d grant refs (%s, %d maps)\n",
9e496d74 391 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
209cd7ab
AP
392 return -1;
393 }
9e496d74
RPM
394 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
395 if (page[i] == NULL) {
396 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
397 }
209cd7ab 398 }
9e496d74
RPM
399 ioreq->blkdev->cnt_map += new_maps;
400 } else if (new_maps) {
401 for (i = 0; i < new_maps; i++) {
209cd7ab 402 ioreq->page[i] = xc_gnttab_map_grant_ref
9e496d74 403 (gnt, domids[i], refs[i], ioreq->prot);
209cd7ab
AP
404 if (ioreq->page[i] == NULL) {
405 xen_be_printf(&ioreq->blkdev->xendev, 0,
406 "can't map grant ref %d (%s, %d maps)\n",
9e496d74 407 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
a76f48e5 408 ioreq->mapped = 1;
209cd7ab
AP
409 ioreq_unmap(ioreq);
410 return -1;
411 }
209cd7ab
AP
412 ioreq->blkdev->cnt_map++;
413 }
9e496d74
RPM
414 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
415 if (page[i] == NULL) {
416 page[i] = ioreq->page[j++];
417 }
418 }
419 }
420 if (ioreq->blkdev->feature_persistent) {
421 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
422 && new_maps) {
423 /* Go through the list of newly mapped grants and add as many
424 * as possible to the list of persistently mapped grants.
425 *
426 * Since we start at the end of ioreq->page(s), we only need
427 * to decrease new_maps to prevent this granted pages from
428 * being unmapped in ioreq_unmap.
429 */
430 grant = g_malloc0(sizeof(*grant));
431 new_maps--;
432 if (batch_maps) {
433 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
434 } else {
435 grant->page = ioreq->page[new_maps];
436 }
437 grant->blkdev = ioreq->blkdev;
438 xen_be_printf(&ioreq->blkdev->xendev, 3,
439 "adding grant %" PRIu32 " page: %p\n",
440 refs[new_maps], grant->page);
441 g_tree_insert(ioreq->blkdev->persistent_gnts,
442 GUINT_TO_POINTER(refs[new_maps]),
443 grant);
444 ioreq->blkdev->persistent_gnt_count++;
445 }
446 }
447 for (i = 0; i < ioreq->v.niov; i++) {
448 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
62d23efa 449 }
c6961b7d 450 ioreq->mapped = 1;
9e496d74 451 ioreq->num_unmap = new_maps;
62d23efa
AL
452 return 0;
453}
454
c6961b7d
SS
455static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
456
62d23efa
AL
457static void qemu_aio_complete(void *opaque, int ret)
458{
459 struct ioreq *ioreq = opaque;
460
461 if (ret != 0) {
462 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
463 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
464 ioreq->aio_errors++;
465 }
466
467 ioreq->aio_inflight--;
c6961b7d
SS
468 if (ioreq->presync) {
469 ioreq->presync = 0;
470 ioreq_runio_qemu_aio(ioreq);
471 return;
472 }
209cd7ab 473 if (ioreq->aio_inflight > 0) {
62d23efa 474 return;
209cd7ab 475 }
d56de074 476 if (ioreq->postsync) {
c6961b7d
SS
477 ioreq->postsync = 0;
478 ioreq->aio_inflight++;
479 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
480 return;
d56de074 481 }
62d23efa
AL
482
483 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
484 ioreq_unmap(ioreq);
485 ioreq_finish(ioreq);
58da5b1e
OH
486 switch (ioreq->req.operation) {
487 case BLKIF_OP_WRITE:
488 case BLKIF_OP_FLUSH_DISKCACHE:
489 if (!ioreq->req.nr_segments) {
490 break;
491 }
492 case BLKIF_OP_READ:
493 bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
494 break;
495 default:
496 break;
497 }
62d23efa
AL
498 qemu_bh_schedule(ioreq->blkdev->bh);
499}
500
501static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
502{
503 struct XenBlkDev *blkdev = ioreq->blkdev;
504
209cd7ab
AP
505 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
506 goto err_no_map;
507 }
62d23efa
AL
508
509 ioreq->aio_inflight++;
209cd7ab 510 if (ioreq->presync) {
c6961b7d
SS
511 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
512 return 0;
209cd7ab 513 }
62d23efa
AL
514
515 switch (ioreq->req.operation) {
516 case BLKIF_OP_READ:
a597e79c 517 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
62d23efa
AL
518 ioreq->aio_inflight++;
519 bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
520 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
521 qemu_aio_complete, ioreq);
209cd7ab 522 break;
62d23efa 523 case BLKIF_OP_WRITE:
7e7b7cba 524 case BLKIF_OP_FLUSH_DISKCACHE:
209cd7ab 525 if (!ioreq->req.nr_segments) {
5cbdebe3 526 break;
209cd7ab 527 }
a597e79c
CH
528
529 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
209bef3e 530 ioreq->aio_inflight++;
62d23efa
AL
531 bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
532 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
533 qemu_aio_complete, ioreq);
209cd7ab 534 break;
62d23efa 535 default:
209cd7ab
AP
536 /* unknown operation (shouldn't happen -- parse catches this) */
537 goto err;
62d23efa
AL
538 }
539
62d23efa
AL
540 qemu_aio_complete(ioreq, 0);
541
542 return 0;
543
544err:
f6ec953c
FZ
545 ioreq_unmap(ioreq);
546err_no_map:
547 ioreq_finish(ioreq);
62d23efa
AL
548 ioreq->status = BLKIF_RSP_ERROR;
549 return -1;
550}
551
552static int blk_send_response_one(struct ioreq *ioreq)
553{
554 struct XenBlkDev *blkdev = ioreq->blkdev;
555 int send_notify = 0;
556 int have_requests = 0;
557 blkif_response_t resp;
558 void *dst;
559
560 resp.id = ioreq->req.id;
561 resp.operation = ioreq->req.operation;
562 resp.status = ioreq->status;
563
564 /* Place on the response ring for the relevant domain. */
565 switch (blkdev->protocol) {
566 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
567 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
568 break;
62d23efa 569 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
570 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
571 blkdev->rings.x86_32_part.rsp_prod_pvt);
209cd7ab 572 break;
62d23efa 573 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
574 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
575 blkdev->rings.x86_64_part.rsp_prod_pvt);
209cd7ab 576 break;
62d23efa 577 default:
209cd7ab 578 dst = NULL;
62d23efa
AL
579 }
580 memcpy(dst, &resp, sizeof(resp));
581 blkdev->rings.common.rsp_prod_pvt++;
582
583 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
584 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
209cd7ab
AP
585 /*
586 * Tail check for pending requests. Allows frontend to avoid
587 * notifications if requests are already in flight (lower
588 * overheads and promotes batching).
589 */
590 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
62d23efa 591 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
209cd7ab 592 have_requests = 1;
62d23efa
AL
593 }
594
209cd7ab
AP
595 if (have_requests) {
596 blkdev->more_work++;
597 }
62d23efa
AL
598 return send_notify;
599}
600
601/* walk finished list, send outstanding responses, free requests */
602static void blk_send_response_all(struct XenBlkDev *blkdev)
603{
604 struct ioreq *ioreq;
605 int send_notify = 0;
606
72cf2d4f
BS
607 while (!QLIST_EMPTY(&blkdev->finished)) {
608 ioreq = QLIST_FIRST(&blkdev->finished);
209cd7ab 609 send_notify += blk_send_response_one(ioreq);
ed547766 610 ioreq_release(ioreq, true);
209cd7ab
AP
611 }
612 if (send_notify) {
613 xen_be_send_notify(&blkdev->xendev);
62d23efa 614 }
62d23efa
AL
615}
616
617static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
618{
619 switch (blkdev->protocol) {
620 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
621 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
622 sizeof(ioreq->req));
623 break;
62d23efa 624 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
625 blkif_get_x86_32_req(&ioreq->req,
626 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
209cd7ab 627 break;
62d23efa 628 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
629 blkif_get_x86_64_req(&ioreq->req,
630 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
209cd7ab 631 break;
62d23efa
AL
632 }
633 return 0;
634}
635
636static void blk_handle_requests(struct XenBlkDev *blkdev)
637{
638 RING_IDX rc, rp;
639 struct ioreq *ioreq;
640
641 blkdev->more_work = 0;
642
643 rc = blkdev->rings.common.req_cons;
644 rp = blkdev->rings.common.sring->req_prod;
645 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
646
4e5b184d 647 blk_send_response_all(blkdev);
fc1f79f7 648 while (rc != rp) {
62d23efa 649 /* pull request from ring */
209cd7ab 650 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
62d23efa 651 break;
209cd7ab 652 }
62d23efa
AL
653 ioreq = ioreq_start(blkdev);
654 if (ioreq == NULL) {
655 blkdev->more_work++;
656 break;
657 }
658 blk_get_request(blkdev, ioreq, rc);
659 blkdev->rings.common.req_cons = ++rc;
660
661 /* parse them */
662 if (ioreq_parse(ioreq) != 0) {
209cd7ab 663 if (blk_send_response_one(ioreq)) {
62d23efa 664 xen_be_send_notify(&blkdev->xendev);
209cd7ab 665 }
ed547766 666 ioreq_release(ioreq, false);
62d23efa
AL
667 continue;
668 }
669
4e5b184d 670 ioreq_runio_qemu_aio(ioreq);
209cd7ab 671 }
62d23efa 672
209cd7ab 673 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
62d23efa 674 qemu_bh_schedule(blkdev->bh);
209cd7ab 675 }
62d23efa
AL
676}
677
678/* ------------------------------------------------------------- */
679
680static void blk_bh(void *opaque)
681{
682 struct XenBlkDev *blkdev = opaque;
683 blk_handle_requests(blkdev);
684}
685
64c27e5b
JB
686/*
687 * We need to account for the grant allocations requiring contiguous
688 * chunks; the worst case number would be
689 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
690 * but in order to keep things simple just use
691 * 2 * max_req * max_seg.
692 */
693#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
694
62d23efa
AL
695static void blk_alloc(struct XenDevice *xendev)
696{
697 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
698
72cf2d4f
BS
699 QLIST_INIT(&blkdev->inflight);
700 QLIST_INIT(&blkdev->finished);
701 QLIST_INIT(&blkdev->freelist);
62d23efa 702 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
209cd7ab 703 if (xen_mode != XEN_EMULATE) {
62d23efa 704 batch_maps = 1;
209cd7ab 705 }
64c27e5b
JB
706 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
707 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
708 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
709 strerror(errno));
710 }
62d23efa
AL
711}
712
713static int blk_init(struct XenDevice *xendev)
714{
715 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
86f425db 716 int info = 0;
454ae734 717 char *directiosafe = NULL;
62d23efa
AL
718
719 /* read xenstore entries */
720 if (blkdev->params == NULL) {
5ea3c2b4 721 char *h = NULL;
209cd7ab 722 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
5ea3c2b4
SS
723 if (blkdev->params != NULL) {
724 h = strchr(blkdev->params, ':');
725 }
209cd7ab
AP
726 if (h != NULL) {
727 blkdev->fileproto = blkdev->params;
728 blkdev->filename = h+1;
729 *h = 0;
730 } else {
731 blkdev->fileproto = "<unset>";
732 blkdev->filename = blkdev->params;
733 }
734 }
7cef3f4f
SS
735 if (!strcmp("aio", blkdev->fileproto)) {
736 blkdev->fileproto = "raw";
737 }
209cd7ab
AP
738 if (blkdev->mode == NULL) {
739 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
740 }
741 if (blkdev->type == NULL) {
742 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
743 }
744 if (blkdev->dev == NULL) {
745 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
746 }
747 if (blkdev->devtype == NULL) {
748 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
749 }
454ae734
SS
750 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
751 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
62d23efa
AL
752
753 /* do we have all we need? */
754 if (blkdev->params == NULL ||
209cd7ab
AP
755 blkdev->mode == NULL ||
756 blkdev->type == NULL ||
757 blkdev->dev == NULL) {
5ea3c2b4 758 goto out_error;
209cd7ab 759 }
62d23efa
AL
760
761 /* read-only ? */
86f425db 762 if (strcmp(blkdev->mode, "w")) {
209cd7ab 763 info |= VDISK_READONLY;
62d23efa
AL
764 }
765
766 /* cdrom ? */
209cd7ab
AP
767 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
768 info |= VDISK_CDROM;
769 }
62d23efa 770
86f425db
AB
771 blkdev->file_blk = BLOCK_SIZE;
772
773 /* fill info
774 * blk_connect supplies sector-size and sectors
775 */
776 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
777 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
778 xenstore_write_be_int(&blkdev->xendev, "info", info);
454ae734
SS
779
780 g_free(directiosafe);
86f425db
AB
781 return 0;
782
783out_error:
784 g_free(blkdev->params);
785 blkdev->params = NULL;
786 g_free(blkdev->mode);
787 blkdev->mode = NULL;
788 g_free(blkdev->type);
789 blkdev->type = NULL;
790 g_free(blkdev->dev);
791 blkdev->dev = NULL;
792 g_free(blkdev->devtype);
793 blkdev->devtype = NULL;
454ae734
SS
794 g_free(directiosafe);
795 blkdev->directiosafe = false;
86f425db
AB
796 return -1;
797}
798
799static int blk_connect(struct XenDevice *xendev)
800{
801 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
802 int pers, index, qflags;
b64ec4e4 803 bool readonly = true;
86f425db
AB
804
805 /* read-only ? */
454ae734
SS
806 if (blkdev->directiosafe) {
807 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
808 } else {
809 qflags = BDRV_O_CACHE_WB;
810 }
86f425db
AB
811 if (strcmp(blkdev->mode, "w") == 0) {
812 qflags |= BDRV_O_RDWR;
b64ec4e4 813 readonly = false;
86f425db
AB
814 }
815
62d23efa 816 /* init qemu block driver */
751c6a17
GH
817 index = (blkdev->xendev.dev - 202 * 256) / 16;
818 blkdev->dinfo = drive_get(IF_XEN, 0, index);
819 if (!blkdev->dinfo) {
98522f63 820 Error *local_err = NULL;
62d23efa
AL
821 /* setup via xenbus -> create new block driver instance */
822 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
98522f63
KW
823 blkdev->bs = bdrv_new(blkdev->dev, &local_err);
824 if (local_err) {
825 blkdev->bs = NULL;
826 }
5ea3c2b4 827 if (blkdev->bs) {
b64ec4e4
FZ
828 BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto,
829 readonly);
ddf5636d
HR
830 if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags,
831 drv, &local_err) != 0)
34b5d2c6
HR
832 {
833 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
834 error_get_pretty(local_err));
835 error_free(local_err);
4f6fd349 836 bdrv_unref(blkdev->bs);
5ea3c2b4
SS
837 blkdev->bs = NULL;
838 }
839 }
840 if (!blkdev->bs) {
86f425db 841 return -1;
ad717139 842 }
62d23efa
AL
843 } else {
844 /* setup via qemu cmdline -> already setup for us */
845 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
209cd7ab 846 blkdev->bs = blkdev->dinfo->bdrv;
4f8a066b
KW
847 if (bdrv_is_read_only(blkdev->bs) && !readonly) {
848 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
849 blkdev->bs = NULL;
850 return -1;
851 }
c0777fe1
FZ
852 /* blkdev->bs is not create by us, we get a reference
853 * so we can bdrv_unref() unconditionally */
854 bdrv_ref(blkdev->bs);
62d23efa 855 }
fa879d62 856 bdrv_attach_dev_nofail(blkdev->bs, blkdev);
62d23efa
AL
857 blkdev->file_size = bdrv_getlength(blkdev->bs);
858 if (blkdev->file_size < 0) {
859 xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
860 (int)blkdev->file_size, strerror(-blkdev->file_size),
093003b1 861 bdrv_get_format_name(blkdev->bs) ?: "-");
209cd7ab 862 blkdev->file_size = 0;
62d23efa 863 }
62d23efa
AL
864
865 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
209cd7ab
AP
866 " size %" PRId64 " (%" PRId64 " MB)\n",
867 blkdev->type, blkdev->fileproto, blkdev->filename,
868 blkdev->file_size, blkdev->file_size >> 20);
62d23efa 869
86f425db
AB
870 /* Fill in number of sector size and number of sectors */
871 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
9246ce88
FF
872 xenstore_write_be_int64(&blkdev->xendev, "sectors",
873 blkdev->file_size / blkdev->file_blk);
62d23efa 874
209cd7ab
AP
875 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
876 return -1;
877 }
62d23efa 878 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
209cd7ab
AP
879 &blkdev->xendev.remote_port) == -1) {
880 return -1;
881 }
9e496d74
RPM
882 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
883 blkdev->feature_persistent = FALSE;
884 } else {
885 blkdev->feature_persistent = !!pers;
886 }
62d23efa
AL
887
888 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
889 if (blkdev->xendev.protocol) {
209cd7ab 890 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
62d23efa 891 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
209cd7ab
AP
892 }
893 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
62d23efa 894 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
209cd7ab 895 }
62d23efa
AL
896 }
897
898 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
209cd7ab
AP
899 blkdev->xendev.dom,
900 blkdev->ring_ref,
901 PROT_READ | PROT_WRITE);
902 if (!blkdev->sring) {
903 return -1;
904 }
62d23efa
AL
905 blkdev->cnt_map++;
906
907 switch (blkdev->protocol) {
908 case BLKIF_PROTOCOL_NATIVE:
909 {
209cd7ab
AP
910 blkif_sring_t *sring_native = blkdev->sring;
911 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
912 break;
62d23efa
AL
913 }
914 case BLKIF_PROTOCOL_X86_32:
915 {
209cd7ab 916 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
6fcfeff9
BS
917
918 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
209cd7ab 919 break;
62d23efa
AL
920 }
921 case BLKIF_PROTOCOL_X86_64:
922 {
209cd7ab 923 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
6fcfeff9
BS
924
925 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
209cd7ab 926 break;
62d23efa
AL
927 }
928 }
929
9e496d74
RPM
930 if (blkdev->feature_persistent) {
931 /* Init persistent grants */
932 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
933 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
934 NULL, NULL,
935 (GDestroyNotify)destroy_grant);
936 blkdev->persistent_gnt_count = 0;
937 }
938
62d23efa
AL
939 xen_be_bind_evtchn(&blkdev->xendev);
940
941 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
209cd7ab
AP
942 "remote port %d, local port %d\n",
943 blkdev->xendev.protocol, blkdev->ring_ref,
944 blkdev->xendev.remote_port, blkdev->xendev.local_port);
62d23efa
AL
945 return 0;
946}
947
948static void blk_disconnect(struct XenDevice *xendev)
949{
950 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
951
952 if (blkdev->bs) {
c0777fe1
FZ
953 bdrv_detach_dev(blkdev->bs, blkdev);
954 bdrv_unref(blkdev->bs);
209cd7ab 955 blkdev->bs = NULL;
62d23efa
AL
956 }
957 xen_be_unbind_evtchn(&blkdev->xendev);
958
959 if (blkdev->sring) {
209cd7ab
AP
960 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
961 blkdev->cnt_map--;
962 blkdev->sring = NULL;
62d23efa
AL
963 }
964}
965
966static int blk_free(struct XenDevice *xendev)
967{
968 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
969 struct ioreq *ioreq;
970
77ba8fef
SS
971 if (blkdev->bs || blkdev->sring) {
972 blk_disconnect(xendev);
973 }
974
9e496d74
RPM
975 /* Free persistent grants */
976 if (blkdev->feature_persistent) {
977 g_tree_destroy(blkdev->persistent_gnts);
978 }
979
72cf2d4f 980 while (!QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab 981 ioreq = QLIST_FIRST(&blkdev->freelist);
72cf2d4f 982 QLIST_REMOVE(ioreq, list);
62d23efa 983 qemu_iovec_destroy(&ioreq->v);
7267c094 984 g_free(ioreq);
62d23efa
AL
985 }
986
7267c094
AL
987 g_free(blkdev->params);
988 g_free(blkdev->mode);
989 g_free(blkdev->type);
990 g_free(blkdev->dev);
991 g_free(blkdev->devtype);
62d23efa
AL
992 qemu_bh_delete(blkdev->bh);
993 return 0;
994}
995
996static void blk_event(struct XenDevice *xendev)
997{
998 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
999
1000 qemu_bh_schedule(blkdev->bh);
1001}
1002
1003struct XenDevOps xen_blkdev_ops = {
1004 .size = sizeof(struct XenBlkDev),
1005 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1006 .alloc = blk_alloc,
1007 .init = blk_init,
384087b2 1008 .initialise = blk_connect,
62d23efa
AL
1009 .disconnect = blk_disconnect,
1010 .event = blk_event,
1011 .free = blk_free,
1012};