]> git.proxmox.com Git - mirror_qemu.git/blame - hw/block/dataplane/xen-block.c
xen: remove 'ioreq' struct/varable/field names from dataplane/xen-block.c
[mirror_qemu.git] / hw / block / dataplane / xen-block.c
CommitLineData
4ea7d1a7 1/*
ca072800
PD
2 * Copyright (c) 2018 Citrix Systems Inc.
3 * (c) Gerd Hoffmann <kraxel@redhat.com>
4ea7d1a7 4 *
ca072800
PD
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; under version 2 of the License.
4ea7d1a7 8 *
ca072800
PD
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
4ea7d1a7 13 *
ca072800
PD
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, see <http://www.gnu.org/licenses/>.
4ea7d1a7 16 *
ca072800
PD
17 * Contributions after 2012-01-13 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
4ea7d1a7
PD
19 */
20
fcab2b46
PD
21#include "qemu/osdep.h"
22#include "qemu/error-report.h"
23#include "qapi/error.h"
24#include "hw/hw.h"
25#include "hw/xen/xen_common.h"
26#include "hw/block/xen_blkif.h"
27#include "sysemu/block-backend.h"
28#include "sysemu/iothread.h"
29#include "xen-block.h"
30
e7f5b5f8 31typedef struct XenBlockRequest {
fcab2b46
PD
32 blkif_request_t req;
33 int16_t status;
34 off_t start;
35 QEMUIOVector v;
36 void *buf;
37 size_t size;
38 int presync;
39 int aio_inflight;
40 int aio_errors;
f3b604e3 41 XenBlockDataPlane *dataplane;
e7f5b5f8 42 QLIST_ENTRY(XenBlockRequest) list;
fcab2b46 43 BlockAcctCookie acct;
e7f5b5f8 44} XenBlockRequest;
4ea7d1a7 45
f3b604e3 46struct XenBlockDataPlane {
fcab2b46
PD
47 XenDevice *xendev;
48 XenEventChannel *event_channel;
49 unsigned int *ring_ref;
50 unsigned int nr_ring_ref;
51 void *sring;
52 int64_t file_blk;
53 int64_t file_size;
54 int protocol;
55 blkif_back_rings_t rings;
56 int more_work;
e7f5b5f8
PD
57 QLIST_HEAD(inflight_head, XenBlockRequest) inflight;
58 QLIST_HEAD(finished_head, XenBlockRequest) finished;
59 QLIST_HEAD(freelist_head, XenBlockRequest) freelist;
fcab2b46
PD
60 int requests_total;
61 int requests_inflight;
62 int requests_finished;
63 unsigned int max_requests;
64 BlockBackend *blk;
65 QEMUBH *bh;
66 IOThread *iothread;
67 AioContext *ctx;
4ea7d1a7
PD
68};
69
e7f5b5f8 70static void ioreq_reset(XenBlockRequest *request)
4ea7d1a7 71{
e7f5b5f8
PD
72 memset(&request->req, 0, sizeof(request->req));
73 request->status = 0;
74 request->start = 0;
75 request->buf = NULL;
76 request->size = 0;
77 request->presync = 0;
4ea7d1a7 78
e7f5b5f8
PD
79 request->aio_inflight = 0;
80 request->aio_errors = 0;
4ea7d1a7 81
e7f5b5f8
PD
82 request->dataplane = NULL;
83 memset(&request->list, 0, sizeof(request->list));
84 memset(&request->acct, 0, sizeof(request->acct));
4ea7d1a7 85
e7f5b5f8 86 qemu_iovec_reset(&request->v);
4ea7d1a7
PD
87}
88
e7f5b5f8 89static XenBlockRequest *ioreq_start(XenBlockDataPlane *dataplane)
4ea7d1a7 90{
e7f5b5f8 91 XenBlockRequest *request = NULL;
4ea7d1a7 92
f3b604e3
PD
93 if (QLIST_EMPTY(&dataplane->freelist)) {
94 if (dataplane->requests_total >= dataplane->max_requests) {
4ea7d1a7
PD
95 goto out;
96 }
97 /* allocate new struct */
e7f5b5f8
PD
98 request = g_malloc0(sizeof(*request));
99 request->dataplane = dataplane;
f3b604e3 100 dataplane->requests_total++;
e7f5b5f8 101 qemu_iovec_init(&request->v, 1);
4ea7d1a7
PD
102 } else {
103 /* get one from freelist */
e7f5b5f8
PD
104 request = QLIST_FIRST(&dataplane->freelist);
105 QLIST_REMOVE(request, list);
4ea7d1a7 106 }
e7f5b5f8 107 QLIST_INSERT_HEAD(&dataplane->inflight, request, list);
f3b604e3 108 dataplane->requests_inflight++;
4ea7d1a7
PD
109
110out:
e7f5b5f8 111 return request;
4ea7d1a7
PD
112}
113
e7f5b5f8 114static void ioreq_finish(XenBlockRequest *request)
4ea7d1a7 115{
e7f5b5f8 116 XenBlockDataPlane *dataplane = request->dataplane;
4ea7d1a7 117
e7f5b5f8
PD
118 QLIST_REMOVE(request, list);
119 QLIST_INSERT_HEAD(&dataplane->finished, request, list);
f3b604e3
PD
120 dataplane->requests_inflight--;
121 dataplane->requests_finished++;
4ea7d1a7
PD
122}
123
e7f5b5f8 124static void ioreq_release(XenBlockRequest *request, bool finish)
4ea7d1a7 125{
e7f5b5f8 126 XenBlockDataPlane *dataplane = request->dataplane;
4ea7d1a7 127
e7f5b5f8
PD
128 QLIST_REMOVE(request, list);
129 ioreq_reset(request);
130 request->dataplane = dataplane;
131 QLIST_INSERT_HEAD(&dataplane->freelist, request, list);
4ea7d1a7 132 if (finish) {
f3b604e3 133 dataplane->requests_finished--;
4ea7d1a7 134 } else {
f3b604e3 135 dataplane->requests_inflight--;
4ea7d1a7
PD
136 }
137}
138
139/*
140 * translate request into iovec + start offset
141 * do sanity checks along the way
142 */
e7f5b5f8 143static int ioreq_parse(XenBlockRequest *request)
4ea7d1a7 144{
e7f5b5f8 145 XenBlockDataPlane *dataplane = request->dataplane;
4ea7d1a7
PD
146 size_t len;
147 int i;
148
e7f5b5f8 149 switch (request->req.operation) {
4ea7d1a7
PD
150 case BLKIF_OP_READ:
151 break;
152 case BLKIF_OP_FLUSH_DISKCACHE:
e7f5b5f8
PD
153 request->presync = 1;
154 if (!request->req.nr_segments) {
4ea7d1a7
PD
155 return 0;
156 }
157 /* fall through */
158 case BLKIF_OP_WRITE:
159 break;
160 case BLKIF_OP_DISCARD:
161 return 0;
162 default:
e7f5b5f8 163 error_report("error: unknown operation (%d)", request->req.operation);
4ea7d1a7
PD
164 goto err;
165 };
166
e7f5b5f8 167 if (request->req.operation != BLKIF_OP_READ &&
f3b604e3 168 blk_is_read_only(dataplane->blk)) {
ca072800 169 error_report("error: write req for ro device");
4ea7d1a7
PD
170 goto err;
171 }
172
e7f5b5f8
PD
173 request->start = request->req.sector_number * dataplane->file_blk;
174 for (i = 0; i < request->req.nr_segments; i++) {
4ea7d1a7 175 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ca072800 176 error_report("error: nr_segments too big");
4ea7d1a7
PD
177 goto err;
178 }
e7f5b5f8 179 if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) {
ca072800 180 error_report("error: first > last sector");
4ea7d1a7
PD
181 goto err;
182 }
e7f5b5f8
PD
183 if (request->req.seg[i].last_sect * dataplane->file_blk >=
184 XC_PAGE_SIZE) {
ca072800 185 error_report("error: page crossing");
4ea7d1a7
PD
186 goto err;
187 }
188
e7f5b5f8
PD
189 len = (request->req.seg[i].last_sect -
190 request->req.seg[i].first_sect + 1) * dataplane->file_blk;
191 request->size += len;
4ea7d1a7 192 }
e7f5b5f8 193 if (request->start + request->size > dataplane->file_size) {
ca072800 194 error_report("error: access beyond end of file");
4ea7d1a7
PD
195 goto err;
196 }
197 return 0;
198
199err:
e7f5b5f8 200 request->status = BLKIF_RSP_ERROR;
4ea7d1a7
PD
201 return -1;
202}
203
e7f5b5f8 204static int ioreq_grant_copy(XenBlockRequest *request)
4ea7d1a7 205{
e7f5b5f8 206 XenBlockDataPlane *dataplane = request->dataplane;
f3b604e3 207 XenDevice *xendev = dataplane->xendev;
fcab2b46
PD
208 XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
209 int i, count;
f3b604e3 210 int64_t file_blk = dataplane->file_blk;
e7f5b5f8
PD
211 bool to_domain = (request->req.operation == BLKIF_OP_READ);
212 void *virt = request->buf;
fcab2b46 213 Error *local_err = NULL;
4ea7d1a7 214
e7f5b5f8 215 if (request->req.nr_segments == 0) {
4ea7d1a7
PD
216 return 0;
217 }
218
e7f5b5f8 219 count = request->req.nr_segments;
4ea7d1a7
PD
220
221 for (i = 0; i < count; i++) {
222 if (to_domain) {
e7f5b5f8
PD
223 segs[i].dest.foreign.ref = request->req.seg[i].gref;
224 segs[i].dest.foreign.offset = request->req.seg[i].first_sect *
4ea7d1a7
PD
225 file_blk;
226 segs[i].source.virt = virt;
227 } else {
e7f5b5f8
PD
228 segs[i].source.foreign.ref = request->req.seg[i].gref;
229 segs[i].source.foreign.offset = request->req.seg[i].first_sect *
4ea7d1a7
PD
230 file_blk;
231 segs[i].dest.virt = virt;
232 }
e7f5b5f8
PD
233 segs[i].len = (request->req.seg[i].last_sect -
234 request->req.seg[i].first_sect + 1) * file_blk;
4ea7d1a7
PD
235 virt += segs[i].len;
236 }
237
fcab2b46
PD
238 xen_device_copy_grant_refs(xendev, to_domain, segs, count, &local_err);
239
240 if (local_err) {
241 error_reportf_err(local_err, "failed to copy data: ");
4ea7d1a7 242
e7f5b5f8 243 request->aio_errors++;
4ea7d1a7
PD
244 return -1;
245 }
246
fcab2b46 247 return 0;
4ea7d1a7
PD
248}
249
e7f5b5f8 250static int ioreq_runio_qemu_aio(XenBlockRequest *request);
4ea7d1a7
PD
251
252static void qemu_aio_complete(void *opaque, int ret)
253{
e7f5b5f8
PD
254 XenBlockRequest *request = opaque;
255 XenBlockDataPlane *dataplane = request->dataplane;
4ea7d1a7 256
f3b604e3 257 aio_context_acquire(dataplane->ctx);
4ea7d1a7
PD
258
259 if (ret != 0) {
ca072800 260 error_report("%s I/O error",
e7f5b5f8 261 request->req.operation == BLKIF_OP_READ ?
ca072800 262 "read" : "write");
e7f5b5f8 263 request->aio_errors++;
4ea7d1a7
PD
264 }
265
e7f5b5f8
PD
266 request->aio_inflight--;
267 if (request->presync) {
268 request->presync = 0;
269 ioreq_runio_qemu_aio(request);
4ea7d1a7
PD
270 goto done;
271 }
e7f5b5f8 272 if (request->aio_inflight > 0) {
4ea7d1a7
PD
273 goto done;
274 }
275
e7f5b5f8 276 switch (request->req.operation) {
4ea7d1a7 277 case BLKIF_OP_READ:
e7f5b5f8 278 /* in case of failure request->aio_errors is increased */
4ea7d1a7 279 if (ret == 0) {
e7f5b5f8 280 ioreq_grant_copy(request);
4ea7d1a7 281 }
e7f5b5f8 282 qemu_vfree(request->buf);
4ea7d1a7
PD
283 break;
284 case BLKIF_OP_WRITE:
285 case BLKIF_OP_FLUSH_DISKCACHE:
e7f5b5f8 286 if (!request->req.nr_segments) {
4ea7d1a7
PD
287 break;
288 }
e7f5b5f8 289 qemu_vfree(request->buf);
4ea7d1a7
PD
290 break;
291 default:
292 break;
293 }
294
e7f5b5f8
PD
295 request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
296 ioreq_finish(request);
4ea7d1a7 297
e7f5b5f8 298 switch (request->req.operation) {
4ea7d1a7
PD
299 case BLKIF_OP_WRITE:
300 case BLKIF_OP_FLUSH_DISKCACHE:
e7f5b5f8 301 if (!request->req.nr_segments) {
4ea7d1a7
PD
302 break;
303 }
304 case BLKIF_OP_READ:
e7f5b5f8
PD
305 if (request->status == BLKIF_RSP_OKAY) {
306 block_acct_done(blk_get_stats(dataplane->blk), &request->acct);
4ea7d1a7 307 } else {
e7f5b5f8 308 block_acct_failed(blk_get_stats(dataplane->blk), &request->acct);
4ea7d1a7
PD
309 }
310 break;
311 case BLKIF_OP_DISCARD:
312 default:
313 break;
314 }
f3b604e3 315 qemu_bh_schedule(dataplane->bh);
4ea7d1a7
PD
316
317done:
f3b604e3 318 aio_context_release(dataplane->ctx);
4ea7d1a7
PD
319}
320
e7f5b5f8
PD
321static bool blk_split_discard(XenBlockRequest *request,
322 blkif_sector_t sector_number,
4ea7d1a7
PD
323 uint64_t nr_sectors)
324{
e7f5b5f8 325 XenBlockDataPlane *dataplane = request->dataplane;
4ea7d1a7
PD
326 int64_t byte_offset;
327 int byte_chunk;
328 uint64_t byte_remaining, limit;
329 uint64_t sec_start = sector_number;
330 uint64_t sec_count = nr_sectors;
331
332 /* Wrap around, or overflowing byte limit? */
333 if (sec_start + sec_count < sec_count ||
f3b604e3 334 sec_start + sec_count > INT64_MAX / dataplane->file_blk) {
4ea7d1a7
PD
335 return false;
336 }
337
f3b604e3
PD
338 limit = BDRV_REQUEST_MAX_SECTORS * dataplane->file_blk;
339 byte_offset = sec_start * dataplane->file_blk;
340 byte_remaining = sec_count * dataplane->file_blk;
4ea7d1a7
PD
341
342 do {
343 byte_chunk = byte_remaining > limit ? limit : byte_remaining;
e7f5b5f8 344 request->aio_inflight++;
f3b604e3 345 blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk,
e7f5b5f8 346 qemu_aio_complete, request);
4ea7d1a7
PD
347 byte_remaining -= byte_chunk;
348 byte_offset += byte_chunk;
349 } while (byte_remaining > 0);
350
351 return true;
352}
353
e7f5b5f8 354static int ioreq_runio_qemu_aio(XenBlockRequest *request)
4ea7d1a7 355{
e7f5b5f8
PD
356 XenBlockDataPlane *dataplane = request->dataplane;
357
358 request->buf = qemu_memalign(XC_PAGE_SIZE, request->size);
359 if (request->req.nr_segments &&
360 (request->req.operation == BLKIF_OP_WRITE ||
361 request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
362 ioreq_grant_copy(request)) {
363 qemu_vfree(request->buf);
4ea7d1a7
PD
364 goto err;
365 }
366
e7f5b5f8
PD
367 request->aio_inflight++;
368 if (request->presync) {
369 blk_aio_flush(request->dataplane->blk, qemu_aio_complete, request);
4ea7d1a7
PD
370 return 0;
371 }
372
e7f5b5f8 373 switch (request->req.operation) {
4ea7d1a7 374 case BLKIF_OP_READ:
e7f5b5f8
PD
375 qemu_iovec_add(&request->v, request->buf, request->size);
376 block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
377 request->v.size, BLOCK_ACCT_READ);
378 request->aio_inflight++;
379 blk_aio_preadv(dataplane->blk, request->start, &request->v, 0,
380 qemu_aio_complete, request);
4ea7d1a7
PD
381 break;
382 case BLKIF_OP_WRITE:
383 case BLKIF_OP_FLUSH_DISKCACHE:
e7f5b5f8 384 if (!request->req.nr_segments) {
4ea7d1a7
PD
385 break;
386 }
387
e7f5b5f8
PD
388 qemu_iovec_add(&request->v, request->buf, request->size);
389 block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
390 request->v.size,
391 request->req.operation == BLKIF_OP_WRITE ?
4ea7d1a7 392 BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
e7f5b5f8
PD
393 request->aio_inflight++;
394 blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0,
395 qemu_aio_complete, request);
4ea7d1a7
PD
396 break;
397 case BLKIF_OP_DISCARD:
398 {
e7f5b5f8
PD
399 struct blkif_request_discard *req = (void *)&request->req;
400 if (!blk_split_discard(request, req->sector_number, req->nr_sectors)) {
4ea7d1a7
PD
401 goto err;
402 }
403 break;
404 }
405 default:
406 /* unknown operation (shouldn't happen -- parse catches this) */
407 goto err;
408 }
409
e7f5b5f8 410 qemu_aio_complete(request, 0);
4ea7d1a7
PD
411
412 return 0;
413
414err:
e7f5b5f8
PD
415 ioreq_finish(request);
416 request->status = BLKIF_RSP_ERROR;
4ea7d1a7
PD
417 return -1;
418}
419
e7f5b5f8 420static int blk_send_response_one(XenBlockRequest *request)
4ea7d1a7 421{
e7f5b5f8 422 XenBlockDataPlane *dataplane = request->dataplane;
fcab2b46
PD
423 int send_notify = 0;
424 int have_requests = 0;
425 blkif_response_t *resp;
4ea7d1a7
PD
426
427 /* Place on the response ring for the relevant domain. */
f3b604e3 428 switch (dataplane->protocol) {
4ea7d1a7
PD
429 case BLKIF_PROTOCOL_NATIVE:
430 resp = (blkif_response_t *)RING_GET_RESPONSE(
f3b604e3
PD
431 &dataplane->rings.native,
432 dataplane->rings.native.rsp_prod_pvt);
4ea7d1a7
PD
433 break;
434 case BLKIF_PROTOCOL_X86_32:
435 resp = (blkif_response_t *)RING_GET_RESPONSE(
f3b604e3
PD
436 &dataplane->rings.x86_32_part,
437 dataplane->rings.x86_32_part.rsp_prod_pvt);
4ea7d1a7
PD
438 break;
439 case BLKIF_PROTOCOL_X86_64:
440 resp = (blkif_response_t *)RING_GET_RESPONSE(
f3b604e3
PD
441 &dataplane->rings.x86_64_part,
442 dataplane->rings.x86_64_part.rsp_prod_pvt);
4ea7d1a7
PD
443 break;
444 default:
445 return 0;
446 }
447
e7f5b5f8
PD
448 resp->id = request->req.id;
449 resp->operation = request->req.operation;
450 resp->status = request->status;
4ea7d1a7 451
f3b604e3 452 dataplane->rings.common.rsp_prod_pvt++;
4ea7d1a7 453
f3b604e3
PD
454 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common,
455 send_notify);
456 if (dataplane->rings.common.rsp_prod_pvt ==
457 dataplane->rings.common.req_cons) {
4ea7d1a7
PD
458 /*
459 * Tail check for pending requests. Allows frontend to avoid
460 * notifications if requests are already in flight (lower
461 * overheads and promotes batching).
462 */
f3b604e3
PD
463 RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common,
464 have_requests);
465 } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) {
4ea7d1a7
PD
466 have_requests = 1;
467 }
468
469 if (have_requests) {
f3b604e3 470 dataplane->more_work++;
4ea7d1a7
PD
471 }
472 return send_notify;
473}
474
475/* walk finished list, send outstanding responses, free requests */
f3b604e3 476static void blk_send_response_all(XenBlockDataPlane *dataplane)
4ea7d1a7 477{
e7f5b5f8 478 XenBlockRequest *request;
4ea7d1a7
PD
479 int send_notify = 0;
480
f3b604e3 481 while (!QLIST_EMPTY(&dataplane->finished)) {
e7f5b5f8
PD
482 request = QLIST_FIRST(&dataplane->finished);
483 send_notify += blk_send_response_one(request);
484 ioreq_release(request, true);
4ea7d1a7
PD
485 }
486 if (send_notify) {
fcab2b46
PD
487 Error *local_err = NULL;
488
f3b604e3
PD
489 xen_device_notify_event_channel(dataplane->xendev,
490 dataplane->event_channel,
fcab2b46
PD
491 &local_err);
492 if (local_err) {
493 error_report_err(local_err);
494 }
4ea7d1a7
PD
495 }
496}
497
e7f5b5f8
PD
498static int blk_get_request(XenBlockDataPlane *dataplane,
499 XenBlockRequest *request, RING_IDX rc)
4ea7d1a7 500{
f3b604e3
PD
501 switch (dataplane->protocol) {
502 case BLKIF_PROTOCOL_NATIVE: {
503 blkif_request_t *req =
504 RING_GET_REQUEST(&dataplane->rings.native, rc);
505
e7f5b5f8 506 memcpy(&request->req, req, sizeof(request->req));
4ea7d1a7 507 break;
f3b604e3
PD
508 }
509 case BLKIF_PROTOCOL_X86_32: {
510 blkif_x86_32_request_t *req =
511 RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc);
512
e7f5b5f8 513 blkif_get_x86_32_req(&request->req, req);
4ea7d1a7 514 break;
f3b604e3
PD
515 }
516 case BLKIF_PROTOCOL_X86_64: {
517 blkif_x86_64_request_t *req =
518 RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc);
519
e7f5b5f8 520 blkif_get_x86_64_req(&request->req, req);
4ea7d1a7
PD
521 break;
522 }
f3b604e3 523 }
4ea7d1a7
PD
524 /* Prevent the compiler from accessing the on-ring fields instead. */
525 barrier();
526 return 0;
527}
528
f3b604e3 529static void blk_handle_requests(XenBlockDataPlane *dataplane)
4ea7d1a7
PD
530{
531 RING_IDX rc, rp;
e7f5b5f8 532 XenBlockRequest *request;
4ea7d1a7 533
f3b604e3 534 dataplane->more_work = 0;
4ea7d1a7 535
f3b604e3
PD
536 rc = dataplane->rings.common.req_cons;
537 rp = dataplane->rings.common.sring->req_prod;
4ea7d1a7
PD
538 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
539
f3b604e3 540 blk_send_response_all(dataplane);
4ea7d1a7
PD
541 while (rc != rp) {
542 /* pull request from ring */
f3b604e3 543 if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
4ea7d1a7
PD
544 break;
545 }
e7f5b5f8
PD
546 request = ioreq_start(dataplane);
547 if (request == NULL) {
f3b604e3 548 dataplane->more_work++;
4ea7d1a7
PD
549 break;
550 }
e7f5b5f8 551 blk_get_request(dataplane, request, rc);
f3b604e3 552 dataplane->rings.common.req_cons = ++rc;
4ea7d1a7
PD
553
554 /* parse them */
e7f5b5f8 555 if (ioreq_parse(request) != 0) {
4ea7d1a7 556
e7f5b5f8 557 switch (request->req.operation) {
4ea7d1a7 558 case BLKIF_OP_READ:
f3b604e3 559 block_acct_invalid(blk_get_stats(dataplane->blk),
4ea7d1a7
PD
560 BLOCK_ACCT_READ);
561 break;
562 case BLKIF_OP_WRITE:
f3b604e3 563 block_acct_invalid(blk_get_stats(dataplane->blk),
4ea7d1a7
PD
564 BLOCK_ACCT_WRITE);
565 break;
566 case BLKIF_OP_FLUSH_DISKCACHE:
f3b604e3 567 block_acct_invalid(blk_get_stats(dataplane->blk),
4ea7d1a7
PD
568 BLOCK_ACCT_FLUSH);
569 default:
570 break;
571 };
572
e7f5b5f8 573 if (blk_send_response_one(request)) {
fcab2b46
PD
574 Error *local_err = NULL;
575
f3b604e3
PD
576 xen_device_notify_event_channel(dataplane->xendev,
577 dataplane->event_channel,
fcab2b46
PD
578 &local_err);
579 if (local_err) {
580 error_report_err(local_err);
581 }
4ea7d1a7 582 }
e7f5b5f8 583 ioreq_release(request, false);
4ea7d1a7
PD
584 continue;
585 }
586
e7f5b5f8 587 ioreq_runio_qemu_aio(request);
4ea7d1a7
PD
588 }
589
f3b604e3
PD
590 if (dataplane->more_work &&
591 dataplane->requests_inflight < dataplane->max_requests) {
592 qemu_bh_schedule(dataplane->bh);
4ea7d1a7
PD
593 }
594}
595
4ea7d1a7
PD
596static void blk_bh(void *opaque)
597{
f3b604e3 598 XenBlockDataPlane *dataplane = opaque;
4ea7d1a7 599
f3b604e3
PD
600 aio_context_acquire(dataplane->ctx);
601 blk_handle_requests(dataplane);
602 aio_context_release(dataplane->ctx);
4ea7d1a7
PD
603}
604
fcab2b46
PD
605static void blk_event(void *opaque)
606{
f3b604e3 607 XenBlockDataPlane *dataplane = opaque;
fcab2b46 608
f3b604e3 609 qemu_bh_schedule(dataplane->bh);
fcab2b46
PD
610}
611
f3b604e3
PD
612XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
613 BlockConf *conf,
614 IOThread *iothread)
4ea7d1a7 615{
f3b604e3 616 XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1);
4ea7d1a7 617
f3b604e3
PD
618 dataplane->xendev = xendev;
619 dataplane->file_blk = conf->logical_block_size;
620 dataplane->blk = conf->blk;
621 dataplane->file_size = blk_getlength(dataplane->blk);
4ea7d1a7 622
f3b604e3
PD
623 QLIST_INIT(&dataplane->inflight);
624 QLIST_INIT(&dataplane->finished);
625 QLIST_INIT(&dataplane->freelist);
4ea7d1a7 626
fcab2b46 627 if (iothread) {
f3b604e3
PD
628 dataplane->iothread = iothread;
629 object_ref(OBJECT(dataplane->iothread));
630 dataplane->ctx = iothread_get_aio_context(dataplane->iothread);
fcab2b46 631 } else {
f3b604e3 632 dataplane->ctx = qemu_get_aio_context();
fcab2b46 633 }
f3b604e3 634 dataplane->bh = aio_bh_new(dataplane->ctx, blk_bh, dataplane);
fcab2b46 635
f3b604e3 636 return dataplane;
4ea7d1a7
PD
637}
638
f3b604e3 639void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane)
4ea7d1a7 640{
e7f5b5f8 641 XenBlockRequest *request;
4ea7d1a7 642
f3b604e3 643 if (!dataplane) {
fcab2b46
PD
644 return;
645 }
4ea7d1a7 646
f3b604e3 647 while (!QLIST_EMPTY(&dataplane->freelist)) {
e7f5b5f8
PD
648 request = QLIST_FIRST(&dataplane->freelist);
649 QLIST_REMOVE(request, list);
650 qemu_iovec_destroy(&request->v);
651 g_free(request);
4ea7d1a7
PD
652 }
653
f3b604e3
PD
654 qemu_bh_delete(dataplane->bh);
655 if (dataplane->iothread) {
656 object_unref(OBJECT(dataplane->iothread));
fcab2b46
PD
657 }
658
f3b604e3 659 g_free(dataplane);
4ea7d1a7
PD
660}
661
f3b604e3 662void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
4ea7d1a7 663{
fcab2b46 664 XenDevice *xendev;
4ea7d1a7 665
f3b604e3 666 if (!dataplane) {
fcab2b46
PD
667 return;
668 }
669
f3b604e3
PD
670 aio_context_acquire(dataplane->ctx);
671 blk_set_aio_context(dataplane->blk, qemu_get_aio_context());
672 aio_context_release(dataplane->ctx);
fcab2b46 673
f3b604e3 674 xendev = dataplane->xendev;
fcab2b46 675
f3b604e3 676 if (dataplane->event_channel) {
fcab2b46
PD
677 Error *local_err = NULL;
678
f3b604e3 679 xen_device_unbind_event_channel(xendev, dataplane->event_channel,
fcab2b46 680 &local_err);
f3b604e3 681 dataplane->event_channel = NULL;
fcab2b46
PD
682
683 if (local_err) {
684 error_report_err(local_err);
685 }
686 }
687
f3b604e3 688 if (dataplane->sring) {
fcab2b46
PD
689 Error *local_err = NULL;
690
f3b604e3
PD
691 xen_device_unmap_grant_refs(xendev, dataplane->sring,
692 dataplane->nr_ring_ref, &local_err);
693 dataplane->sring = NULL;
fcab2b46
PD
694
695 if (local_err) {
696 error_report_err(local_err);
697 }
698 }
699
f3b604e3
PD
700 g_free(dataplane->ring_ref);
701 dataplane->ring_ref = NULL;
fcab2b46
PD
702}
703
f3b604e3 704void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
fcab2b46
PD
705 const unsigned int ring_ref[],
706 unsigned int nr_ring_ref,
707 unsigned int event_channel,
708 unsigned int protocol,
709 Error **errp)
710{
f3b604e3 711 XenDevice *xendev = dataplane->xendev;
fcab2b46
PD
712 Error *local_err = NULL;
713 unsigned int ring_size;
714 unsigned int i;
715
f3b604e3
PD
716 dataplane->nr_ring_ref = nr_ring_ref;
717 dataplane->ring_ref = g_new(unsigned int, nr_ring_ref);
fcab2b46
PD
718
719 for (i = 0; i < nr_ring_ref; i++) {
f3b604e3 720 dataplane->ring_ref[i] = ring_ref[i];
fcab2b46
PD
721 }
722
f3b604e3 723 dataplane->protocol = protocol;
fcab2b46 724
f3b604e3
PD
725 ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
726 switch (dataplane->protocol) {
fcab2b46
PD
727 case BLKIF_PROTOCOL_NATIVE:
728 {
f3b604e3 729 dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size);
fcab2b46
PD
730 break;
731 }
732 case BLKIF_PROTOCOL_X86_32:
733 {
f3b604e3 734 dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
fcab2b46
PD
735 break;
736 }
737 case BLKIF_PROTOCOL_X86_64:
738 {
f3b604e3 739 dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
fcab2b46
PD
740 break;
741 }
742 default:
f3b604e3 743 error_setg(errp, "unknown protocol %u", dataplane->protocol);
fcab2b46
PD
744 return;
745 }
746
f3b604e3 747 xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref,
fcab2b46
PD
748 &local_err);
749 if (local_err) {
750 error_propagate(errp, local_err);
751 goto stop;
752 }
753
f3b604e3
PD
754 dataplane->sring = xen_device_map_grant_refs(xendev,
755 dataplane->ring_ref,
756 dataplane->nr_ring_ref,
fcab2b46
PD
757 PROT_READ | PROT_WRITE,
758 &local_err);
759 if (local_err) {
760 error_propagate(errp, local_err);
761 goto stop;
762 }
763
f3b604e3 764 switch (dataplane->protocol) {
fcab2b46
PD
765 case BLKIF_PROTOCOL_NATIVE:
766 {
f3b604e3 767 blkif_sring_t *sring_native = dataplane->sring;
fcab2b46 768
f3b604e3 769 BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size);
fcab2b46
PD
770 break;
771 }
772 case BLKIF_PROTOCOL_X86_32:
773 {
f3b604e3 774 blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring;
fcab2b46 775
f3b604e3 776 BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32,
fcab2b46
PD
777 ring_size);
778 break;
779 }
780 case BLKIF_PROTOCOL_X86_64:
781 {
f3b604e3 782 blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring;
fcab2b46 783
f3b604e3 784 BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64,
fcab2b46
PD
785 ring_size);
786 break;
787 }
788 }
789
f3b604e3 790 dataplane->event_channel =
fcab2b46 791 xen_device_bind_event_channel(xendev, event_channel,
f3b604e3 792 blk_event, dataplane,
fcab2b46
PD
793 &local_err);
794 if (local_err) {
795 error_propagate(errp, local_err);
796 goto stop;
797 }
798
f3b604e3
PD
799 aio_context_acquire(dataplane->ctx);
800 blk_set_aio_context(dataplane->blk, dataplane->ctx);
801 aio_context_release(dataplane->ctx);
fcab2b46
PD
802 return;
803
804stop:
f3b604e3 805 xen_block_dataplane_stop(dataplane);
4ea7d1a7 806}