]> git.proxmox.com Git - mirror_qemu.git/blob - block/sheepdog.c
cef6faec4db0ca40c6d9945385a6a648beab97df
[mirror_qemu.git] / block / sheepdog.c
1 /*
2 * Copyright (C) 2009-2010 Nippon Telegraph and Telephone Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * You should have received a copy of the GNU General Public License
9 * along with this program. If not, see <http://www.gnu.org/licenses/>.
10 *
11 * Contributions after 2012-01-13 are licensed under the terms of the
12 * GNU GPL, version 2 or (at your option) any later version.
13 */
14
15 #include "qemu/osdep.h"
16 #include "qapi-visit.h"
17 #include "qapi/error.h"
18 #include "qapi/qmp/qdict.h"
19 #include "qapi/qobject-input-visitor.h"
20 #include "qemu/uri.h"
21 #include "qemu/error-report.h"
22 #include "qemu/option.h"
23 #include "qemu/sockets.h"
24 #include "block/block_int.h"
25 #include "sysemu/block-backend.h"
26 #include "qemu/bitops.h"
27 #include "qemu/cutils.h"
28
29 #define SD_PROTO_VER 0x01
30
31 #define SD_DEFAULT_ADDR "localhost"
32 #define SD_DEFAULT_PORT 7000
33
34 #define SD_OP_CREATE_AND_WRITE_OBJ 0x01
35 #define SD_OP_READ_OBJ 0x02
36 #define SD_OP_WRITE_OBJ 0x03
37 /* 0x04 is used internally by Sheepdog */
38
39 #define SD_OP_NEW_VDI 0x11
40 #define SD_OP_LOCK_VDI 0x12
41 #define SD_OP_RELEASE_VDI 0x13
42 #define SD_OP_GET_VDI_INFO 0x14
43 #define SD_OP_READ_VDIS 0x15
44 #define SD_OP_FLUSH_VDI 0x16
45 #define SD_OP_DEL_VDI 0x17
46 #define SD_OP_GET_CLUSTER_DEFAULT 0x18
47
48 #define SD_FLAG_CMD_WRITE 0x01
49 #define SD_FLAG_CMD_COW 0x02
50 #define SD_FLAG_CMD_CACHE 0x04 /* Writeback mode for cache */
51 #define SD_FLAG_CMD_DIRECT 0x08 /* Don't use cache */
52
53 #define SD_RES_SUCCESS 0x00 /* Success */
54 #define SD_RES_UNKNOWN 0x01 /* Unknown error */
55 #define SD_RES_NO_OBJ 0x02 /* No object found */
56 #define SD_RES_EIO 0x03 /* I/O error */
57 #define SD_RES_VDI_EXIST 0x04 /* Vdi exists already */
58 #define SD_RES_INVALID_PARMS 0x05 /* Invalid parameters */
59 #define SD_RES_SYSTEM_ERROR 0x06 /* System error */
60 #define SD_RES_VDI_LOCKED 0x07 /* Vdi is locked */
61 #define SD_RES_NO_VDI 0x08 /* No vdi found */
62 #define SD_RES_NO_BASE_VDI 0x09 /* No base vdi found */
63 #define SD_RES_VDI_READ 0x0A /* Cannot read requested vdi */
64 #define SD_RES_VDI_WRITE 0x0B /* Cannot write requested vdi */
65 #define SD_RES_BASE_VDI_READ 0x0C /* Cannot read base vdi */
66 #define SD_RES_BASE_VDI_WRITE 0x0D /* Cannot write base vdi */
67 #define SD_RES_NO_TAG 0x0E /* Requested tag is not found */
68 #define SD_RES_STARTUP 0x0F /* Sheepdog is on starting up */
69 #define SD_RES_VDI_NOT_LOCKED 0x10 /* Vdi is not locked */
70 #define SD_RES_SHUTDOWN 0x11 /* Sheepdog is shutting down */
71 #define SD_RES_NO_MEM 0x12 /* Cannot allocate memory */
72 #define SD_RES_FULL_VDI 0x13 /* we already have the maximum vdis */
73 #define SD_RES_VER_MISMATCH 0x14 /* Protocol version mismatch */
74 #define SD_RES_NO_SPACE 0x15 /* Server has no room for new objects */
75 #define SD_RES_WAIT_FOR_FORMAT 0x16 /* Waiting for a format operation */
76 #define SD_RES_WAIT_FOR_JOIN 0x17 /* Waiting for other nodes joining */
77 #define SD_RES_JOIN_FAILED 0x18 /* Target node had failed to join sheepdog */
78 #define SD_RES_HALT 0x19 /* Sheepdog is stopped serving IO request */
79 #define SD_RES_READONLY 0x1A /* Object is read-only */
80
81 /*
82 * Object ID rules
83 *
84 * 0 - 19 (20 bits): data object space
85 * 20 - 31 (12 bits): reserved data object space
86 * 32 - 55 (24 bits): vdi object space
87 * 56 - 59 ( 4 bits): reserved vdi object space
88 * 60 - 63 ( 4 bits): object type identifier space
89 */
90
91 #define VDI_SPACE_SHIFT 32
92 #define VDI_BIT (UINT64_C(1) << 63)
93 #define VMSTATE_BIT (UINT64_C(1) << 62)
94 #define MAX_DATA_OBJS (UINT64_C(1) << 20)
95 #define MAX_CHILDREN 1024
96 #define SD_MAX_VDI_LEN 256
97 #define SD_MAX_VDI_TAG_LEN 256
98 #define SD_NR_VDIS (1U << 24)
99 #define SD_DATA_OBJ_SIZE (UINT64_C(1) << 22)
100 #define SD_MAX_VDI_SIZE (SD_DATA_OBJ_SIZE * MAX_DATA_OBJS)
101 #define SD_DEFAULT_BLOCK_SIZE_SHIFT 22
102 /*
103 * For erasure coding, we use at most SD_EC_MAX_STRIP for data strips and
104 * (SD_EC_MAX_STRIP - 1) for parity strips
105 *
106 * SD_MAX_COPIES is sum of number of data strips and parity strips.
107 */
108 #define SD_EC_MAX_STRIP 16
109 #define SD_MAX_COPIES (SD_EC_MAX_STRIP * 2 - 1)
110
111 #define SD_INODE_SIZE (sizeof(SheepdogInode))
112 #define CURRENT_VDI_ID 0
113
114 #define LOCK_TYPE_NORMAL 0
115 #define LOCK_TYPE_SHARED 1 /* for iSCSI multipath */
116
117 typedef struct SheepdogReq {
118 uint8_t proto_ver;
119 uint8_t opcode;
120 uint16_t flags;
121 uint32_t epoch;
122 uint32_t id;
123 uint32_t data_length;
124 uint32_t opcode_specific[8];
125 } SheepdogReq;
126
127 typedef struct SheepdogRsp {
128 uint8_t proto_ver;
129 uint8_t opcode;
130 uint16_t flags;
131 uint32_t epoch;
132 uint32_t id;
133 uint32_t data_length;
134 uint32_t result;
135 uint32_t opcode_specific[7];
136 } SheepdogRsp;
137
138 typedef struct SheepdogObjReq {
139 uint8_t proto_ver;
140 uint8_t opcode;
141 uint16_t flags;
142 uint32_t epoch;
143 uint32_t id;
144 uint32_t data_length;
145 uint64_t oid;
146 uint64_t cow_oid;
147 uint8_t copies;
148 uint8_t copy_policy;
149 uint8_t reserved[6];
150 uint64_t offset;
151 } SheepdogObjReq;
152
153 typedef struct SheepdogObjRsp {
154 uint8_t proto_ver;
155 uint8_t opcode;
156 uint16_t flags;
157 uint32_t epoch;
158 uint32_t id;
159 uint32_t data_length;
160 uint32_t result;
161 uint8_t copies;
162 uint8_t copy_policy;
163 uint8_t reserved[2];
164 uint32_t pad[6];
165 } SheepdogObjRsp;
166
167 typedef struct SheepdogVdiReq {
168 uint8_t proto_ver;
169 uint8_t opcode;
170 uint16_t flags;
171 uint32_t epoch;
172 uint32_t id;
173 uint32_t data_length;
174 uint64_t vdi_size;
175 uint32_t base_vdi_id;
176 uint8_t copies;
177 uint8_t copy_policy;
178 uint8_t store_policy;
179 uint8_t block_size_shift;
180 uint32_t snapid;
181 uint32_t type;
182 uint32_t pad[2];
183 } SheepdogVdiReq;
184
185 typedef struct SheepdogVdiRsp {
186 uint8_t proto_ver;
187 uint8_t opcode;
188 uint16_t flags;
189 uint32_t epoch;
190 uint32_t id;
191 uint32_t data_length;
192 uint32_t result;
193 uint32_t rsvd;
194 uint32_t vdi_id;
195 uint32_t pad[5];
196 } SheepdogVdiRsp;
197
198 typedef struct SheepdogClusterRsp {
199 uint8_t proto_ver;
200 uint8_t opcode;
201 uint16_t flags;
202 uint32_t epoch;
203 uint32_t id;
204 uint32_t data_length;
205 uint32_t result;
206 uint8_t nr_copies;
207 uint8_t copy_policy;
208 uint8_t block_size_shift;
209 uint8_t __pad1;
210 uint32_t __pad2[6];
211 } SheepdogClusterRsp;
212
213 typedef struct SheepdogInode {
214 char name[SD_MAX_VDI_LEN];
215 char tag[SD_MAX_VDI_TAG_LEN];
216 uint64_t ctime;
217 uint64_t snap_ctime;
218 uint64_t vm_clock_nsec;
219 uint64_t vdi_size;
220 uint64_t vm_state_size;
221 uint16_t copy_policy;
222 uint8_t nr_copies;
223 uint8_t block_size_shift;
224 uint32_t snap_id;
225 uint32_t vdi_id;
226 uint32_t parent_vdi_id;
227 uint32_t child_vdi_id[MAX_CHILDREN];
228 uint32_t data_vdi_id[MAX_DATA_OBJS];
229 } SheepdogInode;
230
231 #define SD_INODE_HEADER_SIZE offsetof(SheepdogInode, data_vdi_id)
232
233 /*
234 * 64 bit FNV-1a non-zero initial basis
235 */
236 #define FNV1A_64_INIT ((uint64_t)0xcbf29ce484222325ULL)
237
238 /*
239 * 64 bit Fowler/Noll/Vo FNV-1a hash code
240 */
241 static inline uint64_t fnv_64a_buf(void *buf, size_t len, uint64_t hval)
242 {
243 unsigned char *bp = buf;
244 unsigned char *be = bp + len;
245 while (bp < be) {
246 hval ^= (uint64_t) *bp++;
247 hval += (hval << 1) + (hval << 4) + (hval << 5) +
248 (hval << 7) + (hval << 8) + (hval << 40);
249 }
250 return hval;
251 }
252
253 static inline bool is_data_obj_writable(SheepdogInode *inode, unsigned int idx)
254 {
255 return inode->vdi_id == inode->data_vdi_id[idx];
256 }
257
258 static inline bool is_data_obj(uint64_t oid)
259 {
260 return !(VDI_BIT & oid);
261 }
262
263 static inline uint64_t data_oid_to_idx(uint64_t oid)
264 {
265 return oid & (MAX_DATA_OBJS - 1);
266 }
267
268 static inline uint32_t oid_to_vid(uint64_t oid)
269 {
270 return (oid & ~VDI_BIT) >> VDI_SPACE_SHIFT;
271 }
272
273 static inline uint64_t vid_to_vdi_oid(uint32_t vid)
274 {
275 return VDI_BIT | ((uint64_t)vid << VDI_SPACE_SHIFT);
276 }
277
278 static inline uint64_t vid_to_vmstate_oid(uint32_t vid, uint32_t idx)
279 {
280 return VMSTATE_BIT | ((uint64_t)vid << VDI_SPACE_SHIFT) | idx;
281 }
282
283 static inline uint64_t vid_to_data_oid(uint32_t vid, uint32_t idx)
284 {
285 return ((uint64_t)vid << VDI_SPACE_SHIFT) | idx;
286 }
287
288 static inline bool is_snapshot(struct SheepdogInode *inode)
289 {
290 return !!inode->snap_ctime;
291 }
292
293 static inline size_t count_data_objs(const struct SheepdogInode *inode)
294 {
295 return DIV_ROUND_UP(inode->vdi_size,
296 (1UL << inode->block_size_shift));
297 }
298
299 #undef DPRINTF
300 #ifdef DEBUG_SDOG
301 #define DEBUG_SDOG_PRINT 1
302 #else
303 #define DEBUG_SDOG_PRINT 0
304 #endif
305 #define DPRINTF(fmt, args...) \
306 do { \
307 if (DEBUG_SDOG_PRINT) { \
308 fprintf(stderr, "%s %d: " fmt, __func__, __LINE__, ##args); \
309 } \
310 } while (0)
311
312 typedef struct SheepdogAIOCB SheepdogAIOCB;
313 typedef struct BDRVSheepdogState BDRVSheepdogState;
314
315 typedef struct AIOReq {
316 SheepdogAIOCB *aiocb;
317 unsigned int iov_offset;
318
319 uint64_t oid;
320 uint64_t base_oid;
321 uint64_t offset;
322 unsigned int data_len;
323 uint8_t flags;
324 uint32_t id;
325 bool create;
326
327 QLIST_ENTRY(AIOReq) aio_siblings;
328 } AIOReq;
329
330 enum AIOCBState {
331 AIOCB_WRITE_UDATA,
332 AIOCB_READ_UDATA,
333 AIOCB_FLUSH_CACHE,
334 AIOCB_DISCARD_OBJ,
335 };
336
337 #define AIOCBOverlapping(x, y) \
338 (!(x->max_affect_data_idx < y->min_affect_data_idx \
339 || y->max_affect_data_idx < x->min_affect_data_idx))
340
341 struct SheepdogAIOCB {
342 BDRVSheepdogState *s;
343
344 QEMUIOVector *qiov;
345
346 int64_t sector_num;
347 int nb_sectors;
348
349 int ret;
350 enum AIOCBState aiocb_type;
351
352 Coroutine *coroutine;
353 int nr_pending;
354
355 uint32_t min_affect_data_idx;
356 uint32_t max_affect_data_idx;
357
358 /*
359 * The difference between affect_data_idx and dirty_data_idx:
360 * affect_data_idx represents range of index of all request types.
361 * dirty_data_idx represents range of index updated by COW requests.
362 * dirty_data_idx is used for updating an inode object.
363 */
364 uint32_t min_dirty_data_idx;
365 uint32_t max_dirty_data_idx;
366
367 QLIST_ENTRY(SheepdogAIOCB) aiocb_siblings;
368 };
369
370 struct BDRVSheepdogState {
371 BlockDriverState *bs;
372 AioContext *aio_context;
373
374 SheepdogInode inode;
375
376 char name[SD_MAX_VDI_LEN];
377 bool is_snapshot;
378 uint32_t cache_flags;
379 bool discard_supported;
380
381 SocketAddress *addr;
382 int fd;
383
384 CoMutex lock;
385 Coroutine *co_send;
386 Coroutine *co_recv;
387
388 uint32_t aioreq_seq_num;
389
390 /* Every aio request must be linked to either of these queues. */
391 QLIST_HEAD(inflight_aio_head, AIOReq) inflight_aio_head;
392 QLIST_HEAD(failed_aio_head, AIOReq) failed_aio_head;
393
394 CoMutex queue_lock;
395 CoQueue overlapping_queue;
396 QLIST_HEAD(inflight_aiocb_head, SheepdogAIOCB) inflight_aiocb_head;
397 };
398
399 typedef struct BDRVSheepdogReopenState {
400 int fd;
401 int cache_flags;
402 } BDRVSheepdogReopenState;
403
404 static const char *sd_strerror(int err)
405 {
406 int i;
407
408 static const struct {
409 int err;
410 const char *desc;
411 } errors[] = {
412 {SD_RES_SUCCESS, "Success"},
413 {SD_RES_UNKNOWN, "Unknown error"},
414 {SD_RES_NO_OBJ, "No object found"},
415 {SD_RES_EIO, "I/O error"},
416 {SD_RES_VDI_EXIST, "VDI exists already"},
417 {SD_RES_INVALID_PARMS, "Invalid parameters"},
418 {SD_RES_SYSTEM_ERROR, "System error"},
419 {SD_RES_VDI_LOCKED, "VDI is already locked"},
420 {SD_RES_NO_VDI, "No vdi found"},
421 {SD_RES_NO_BASE_VDI, "No base VDI found"},
422 {SD_RES_VDI_READ, "Failed read the requested VDI"},
423 {SD_RES_VDI_WRITE, "Failed to write the requested VDI"},
424 {SD_RES_BASE_VDI_READ, "Failed to read the base VDI"},
425 {SD_RES_BASE_VDI_WRITE, "Failed to write the base VDI"},
426 {SD_RES_NO_TAG, "Failed to find the requested tag"},
427 {SD_RES_STARTUP, "The system is still booting"},
428 {SD_RES_VDI_NOT_LOCKED, "VDI isn't locked"},
429 {SD_RES_SHUTDOWN, "The system is shutting down"},
430 {SD_RES_NO_MEM, "Out of memory on the server"},
431 {SD_RES_FULL_VDI, "We already have the maximum vdis"},
432 {SD_RES_VER_MISMATCH, "Protocol version mismatch"},
433 {SD_RES_NO_SPACE, "Server has no space for new objects"},
434 {SD_RES_WAIT_FOR_FORMAT, "Sheepdog is waiting for a format operation"},
435 {SD_RES_WAIT_FOR_JOIN, "Sheepdog is waiting for other nodes joining"},
436 {SD_RES_JOIN_FAILED, "Target node had failed to join sheepdog"},
437 {SD_RES_HALT, "Sheepdog is stopped serving IO request"},
438 {SD_RES_READONLY, "Object is read-only"},
439 };
440
441 for (i = 0; i < ARRAY_SIZE(errors); ++i) {
442 if (errors[i].err == err) {
443 return errors[i].desc;
444 }
445 }
446
447 return "Invalid error code";
448 }
449
450 /*
451 * Sheepdog I/O handling:
452 *
453 * 1. In sd_co_rw_vector, we send the I/O requests to the server and
454 * link the requests to the inflight_list in the
455 * BDRVSheepdogState. The function yields while waiting for
456 * receiving the response.
457 *
458 * 2. We receive the response in aio_read_response, the fd handler to
459 * the sheepdog connection. We switch back to sd_co_readv/sd_writev
460 * after all the requests belonging to the AIOCB are finished. If
461 * needed, sd_co_writev will send another requests for the vdi object.
462 */
463
464 static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb,
465 uint64_t oid, unsigned int data_len,
466 uint64_t offset, uint8_t flags, bool create,
467 uint64_t base_oid, unsigned int iov_offset)
468 {
469 AIOReq *aio_req;
470
471 aio_req = g_malloc(sizeof(*aio_req));
472 aio_req->aiocb = acb;
473 aio_req->iov_offset = iov_offset;
474 aio_req->oid = oid;
475 aio_req->base_oid = base_oid;
476 aio_req->offset = offset;
477 aio_req->data_len = data_len;
478 aio_req->flags = flags;
479 aio_req->id = s->aioreq_seq_num++;
480 aio_req->create = create;
481
482 acb->nr_pending++;
483 return aio_req;
484 }
485
486 static void wait_for_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *acb)
487 {
488 SheepdogAIOCB *cb;
489
490 retry:
491 QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) {
492 if (AIOCBOverlapping(acb, cb)) {
493 qemu_co_queue_wait(&s->overlapping_queue, &s->queue_lock);
494 goto retry;
495 }
496 }
497 }
498
499 static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s,
500 QEMUIOVector *qiov, int64_t sector_num, int nb_sectors,
501 int type)
502 {
503 uint32_t object_size;
504
505 object_size = (UINT32_C(1) << s->inode.block_size_shift);
506
507 acb->s = s;
508
509 acb->qiov = qiov;
510
511 acb->sector_num = sector_num;
512 acb->nb_sectors = nb_sectors;
513
514 acb->coroutine = qemu_coroutine_self();
515 acb->ret = 0;
516 acb->nr_pending = 0;
517
518 acb->min_affect_data_idx = acb->sector_num * BDRV_SECTOR_SIZE / object_size;
519 acb->max_affect_data_idx = (acb->sector_num * BDRV_SECTOR_SIZE +
520 acb->nb_sectors * BDRV_SECTOR_SIZE) / object_size;
521
522 acb->min_dirty_data_idx = UINT32_MAX;
523 acb->max_dirty_data_idx = 0;
524 acb->aiocb_type = type;
525
526 if (type == AIOCB_FLUSH_CACHE) {
527 return;
528 }
529
530 qemu_co_mutex_lock(&s->queue_lock);
531 wait_for_overlapping_aiocb(s, acb);
532 QLIST_INSERT_HEAD(&s->inflight_aiocb_head, acb, aiocb_siblings);
533 qemu_co_mutex_unlock(&s->queue_lock);
534 }
535
536 static SocketAddress *sd_socket_address(const char *path,
537 const char *host, const char *port)
538 {
539 SocketAddress *addr = g_new0(SocketAddress, 1);
540
541 if (path) {
542 addr->type = SOCKET_ADDRESS_TYPE_UNIX;
543 addr->u.q_unix.path = g_strdup(path);
544 } else {
545 addr->type = SOCKET_ADDRESS_TYPE_INET;
546 addr->u.inet.host = g_strdup(host ?: SD_DEFAULT_ADDR);
547 addr->u.inet.port = g_strdup(port ?: stringify(SD_DEFAULT_PORT));
548 }
549
550 return addr;
551 }
552
553 static SocketAddress *sd_server_config(QDict *options, Error **errp)
554 {
555 QDict *server = NULL;
556 QObject *crumpled_server = NULL;
557 Visitor *iv = NULL;
558 SocketAddress *saddr = NULL;
559 Error *local_err = NULL;
560
561 qdict_extract_subqdict(options, &server, "server.");
562
563 crumpled_server = qdict_crumple(server, errp);
564 if (!crumpled_server) {
565 goto done;
566 }
567
568 /*
569 * FIXME .numeric, .to, .ipv4 or .ipv6 don't work with -drive
570 * server.type=inet. .to doesn't matter, it's ignored anyway.
571 * That's because when @options come from -blockdev or
572 * blockdev_add, members are typed according to the QAPI schema,
573 * but when they come from -drive, they're all QString. The
574 * visitor expects the former.
575 */
576 iv = qobject_input_visitor_new(crumpled_server);
577 visit_type_SocketAddress(iv, NULL, &saddr, &local_err);
578 if (local_err) {
579 error_propagate(errp, local_err);
580 goto done;
581 }
582
583 done:
584 visit_free(iv);
585 qobject_decref(crumpled_server);
586 QDECREF(server);
587 return saddr;
588 }
589
590 /* Return -EIO in case of error, file descriptor on success */
591 static int connect_to_sdog(BDRVSheepdogState *s, Error **errp)
592 {
593 int fd;
594
595 fd = socket_connect(s->addr, errp);
596
597 if (s->addr->type == SOCKET_ADDRESS_TYPE_INET && fd >= 0) {
598 int ret = socket_set_nodelay(fd);
599 if (ret < 0) {
600 error_report("%s", strerror(errno));
601 }
602 }
603
604 if (fd >= 0) {
605 qemu_set_nonblock(fd);
606 } else {
607 fd = -EIO;
608 }
609
610 return fd;
611 }
612
613 /* Return 0 on success and -errno in case of error */
614 static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
615 unsigned int *wlen)
616 {
617 int ret;
618
619 ret = qemu_co_send(sockfd, hdr, sizeof(*hdr));
620 if (ret != sizeof(*hdr)) {
621 error_report("failed to send a req, %s", strerror(errno));
622 return -errno;
623 }
624
625 ret = qemu_co_send(sockfd, data, *wlen);
626 if (ret != *wlen) {
627 error_report("failed to send a req, %s", strerror(errno));
628 return -errno;
629 }
630
631 return ret;
632 }
633
634 typedef struct SheepdogReqCo {
635 int sockfd;
636 BlockDriverState *bs;
637 AioContext *aio_context;
638 SheepdogReq *hdr;
639 void *data;
640 unsigned int *wlen;
641 unsigned int *rlen;
642 int ret;
643 bool finished;
644 Coroutine *co;
645 } SheepdogReqCo;
646
647 static void restart_co_req(void *opaque)
648 {
649 SheepdogReqCo *srco = opaque;
650
651 aio_co_wake(srco->co);
652 }
653
654 static coroutine_fn void do_co_req(void *opaque)
655 {
656 int ret;
657 SheepdogReqCo *srco = opaque;
658 int sockfd = srco->sockfd;
659 SheepdogReq *hdr = srco->hdr;
660 void *data = srco->data;
661 unsigned int *wlen = srco->wlen;
662 unsigned int *rlen = srco->rlen;
663
664 srco->co = qemu_coroutine_self();
665 aio_set_fd_handler(srco->aio_context, sockfd, false,
666 NULL, restart_co_req, NULL, srco);
667
668 ret = send_co_req(sockfd, hdr, data, wlen);
669 if (ret < 0) {
670 goto out;
671 }
672
673 aio_set_fd_handler(srco->aio_context, sockfd, false,
674 restart_co_req, NULL, NULL, srco);
675
676 ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
677 if (ret != sizeof(*hdr)) {
678 error_report("failed to get a rsp, %s", strerror(errno));
679 ret = -errno;
680 goto out;
681 }
682
683 if (*rlen > hdr->data_length) {
684 *rlen = hdr->data_length;
685 }
686
687 if (*rlen) {
688 ret = qemu_co_recv(sockfd, data, *rlen);
689 if (ret != *rlen) {
690 error_report("failed to get the data, %s", strerror(errno));
691 ret = -errno;
692 goto out;
693 }
694 }
695 ret = 0;
696 out:
697 /* there is at most one request for this sockfd, so it is safe to
698 * set each handler to NULL. */
699 aio_set_fd_handler(srco->aio_context, sockfd, false,
700 NULL, NULL, NULL, NULL);
701
702 srco->co = NULL;
703 srco->ret = ret;
704 /* Set srco->finished before reading bs->wakeup. */
705 atomic_mb_set(&srco->finished, true);
706 if (srco->bs) {
707 bdrv_wakeup(srco->bs);
708 }
709 }
710
711 /*
712 * Send the request to the sheep in a synchronous manner.
713 *
714 * Return 0 on success, -errno in case of error.
715 */
716 static int do_req(int sockfd, BlockDriverState *bs, SheepdogReq *hdr,
717 void *data, unsigned int *wlen, unsigned int *rlen)
718 {
719 Coroutine *co;
720 SheepdogReqCo srco = {
721 .sockfd = sockfd,
722 .aio_context = bs ? bdrv_get_aio_context(bs) : qemu_get_aio_context(),
723 .bs = bs,
724 .hdr = hdr,
725 .data = data,
726 .wlen = wlen,
727 .rlen = rlen,
728 .ret = 0,
729 .finished = false,
730 };
731
732 if (qemu_in_coroutine()) {
733 do_co_req(&srco);
734 } else {
735 co = qemu_coroutine_create(do_co_req, &srco);
736 if (bs) {
737 bdrv_coroutine_enter(bs, co);
738 BDRV_POLL_WHILE(bs, !srco.finished);
739 } else {
740 qemu_coroutine_enter(co);
741 while (!srco.finished) {
742 aio_poll(qemu_get_aio_context(), true);
743 }
744 }
745 }
746
747 return srco.ret;
748 }
749
750 static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
751 struct iovec *iov, int niov,
752 enum AIOCBState aiocb_type);
753 static void coroutine_fn resend_aioreq(BDRVSheepdogState *s, AIOReq *aio_req);
754 static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag);
755 static int get_sheep_fd(BDRVSheepdogState *s, Error **errp);
756 static void co_write_request(void *opaque);
757
758 static coroutine_fn void reconnect_to_sdog(void *opaque)
759 {
760 BDRVSheepdogState *s = opaque;
761 AIOReq *aio_req, *next;
762
763 aio_set_fd_handler(s->aio_context, s->fd, false, NULL,
764 NULL, NULL, NULL);
765 close(s->fd);
766 s->fd = -1;
767
768 /* Wait for outstanding write requests to be completed. */
769 while (s->co_send != NULL) {
770 co_write_request(opaque);
771 }
772
773 /* Try to reconnect the sheepdog server every one second. */
774 while (s->fd < 0) {
775 Error *local_err = NULL;
776 s->fd = get_sheep_fd(s, &local_err);
777 if (s->fd < 0) {
778 DPRINTF("Wait for connection to be established\n");
779 error_report_err(local_err);
780 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000000ULL);
781 }
782 };
783
784 /*
785 * Now we have to resend all the request in the inflight queue. However,
786 * resend_aioreq() can yield and newly created requests can be added to the
787 * inflight queue before the coroutine is resumed. To avoid mixing them, we
788 * have to move all the inflight requests to the failed queue before
789 * resend_aioreq() is called.
790 */
791 qemu_co_mutex_lock(&s->queue_lock);
792 QLIST_FOREACH_SAFE(aio_req, &s->inflight_aio_head, aio_siblings, next) {
793 QLIST_REMOVE(aio_req, aio_siblings);
794 QLIST_INSERT_HEAD(&s->failed_aio_head, aio_req, aio_siblings);
795 }
796
797 /* Resend all the failed aio requests. */
798 while (!QLIST_EMPTY(&s->failed_aio_head)) {
799 aio_req = QLIST_FIRST(&s->failed_aio_head);
800 QLIST_REMOVE(aio_req, aio_siblings);
801 qemu_co_mutex_unlock(&s->queue_lock);
802 resend_aioreq(s, aio_req);
803 qemu_co_mutex_lock(&s->queue_lock);
804 }
805 qemu_co_mutex_unlock(&s->queue_lock);
806 }
807
808 /*
809 * Receive responses of the I/O requests.
810 *
811 * This function is registered as a fd handler, and called from the
812 * main loop when s->fd is ready for reading responses.
813 */
814 static void coroutine_fn aio_read_response(void *opaque)
815 {
816 SheepdogObjRsp rsp;
817 BDRVSheepdogState *s = opaque;
818 int fd = s->fd;
819 int ret;
820 AIOReq *aio_req = NULL;
821 SheepdogAIOCB *acb;
822 uint64_t idx;
823
824 /* read a header */
825 ret = qemu_co_recv(fd, &rsp, sizeof(rsp));
826 if (ret != sizeof(rsp)) {
827 error_report("failed to get the header, %s", strerror(errno));
828 goto err;
829 }
830
831 /* find the right aio_req from the inflight aio list */
832 QLIST_FOREACH(aio_req, &s->inflight_aio_head, aio_siblings) {
833 if (aio_req->id == rsp.id) {
834 break;
835 }
836 }
837 if (!aio_req) {
838 error_report("cannot find aio_req %x", rsp.id);
839 goto err;
840 }
841
842 acb = aio_req->aiocb;
843
844 switch (acb->aiocb_type) {
845 case AIOCB_WRITE_UDATA:
846 if (!is_data_obj(aio_req->oid)) {
847 break;
848 }
849 idx = data_oid_to_idx(aio_req->oid);
850
851 if (aio_req->create) {
852 /*
853 * If the object is newly created one, we need to update
854 * the vdi object (metadata object). min_dirty_data_idx
855 * and max_dirty_data_idx are changed to include updated
856 * index between them.
857 */
858 if (rsp.result == SD_RES_SUCCESS) {
859 s->inode.data_vdi_id[idx] = s->inode.vdi_id;
860 acb->max_dirty_data_idx = MAX(idx, acb->max_dirty_data_idx);
861 acb->min_dirty_data_idx = MIN(idx, acb->min_dirty_data_idx);
862 }
863 }
864 break;
865 case AIOCB_READ_UDATA:
866 ret = qemu_co_recvv(fd, acb->qiov->iov, acb->qiov->niov,
867 aio_req->iov_offset, rsp.data_length);
868 if (ret != rsp.data_length) {
869 error_report("failed to get the data, %s", strerror(errno));
870 goto err;
871 }
872 break;
873 case AIOCB_FLUSH_CACHE:
874 if (rsp.result == SD_RES_INVALID_PARMS) {
875 DPRINTF("disable cache since the server doesn't support it\n");
876 s->cache_flags = SD_FLAG_CMD_DIRECT;
877 rsp.result = SD_RES_SUCCESS;
878 }
879 break;
880 case AIOCB_DISCARD_OBJ:
881 switch (rsp.result) {
882 case SD_RES_INVALID_PARMS:
883 error_report("server doesn't support discard command");
884 rsp.result = SD_RES_SUCCESS;
885 s->discard_supported = false;
886 break;
887 default:
888 break;
889 }
890 }
891
892 /* No more data for this aio_req (reload_inode below uses its own file
893 * descriptor handler which doesn't use co_recv).
894 */
895 s->co_recv = NULL;
896
897 qemu_co_mutex_lock(&s->queue_lock);
898 QLIST_REMOVE(aio_req, aio_siblings);
899 qemu_co_mutex_unlock(&s->queue_lock);
900
901 switch (rsp.result) {
902 case SD_RES_SUCCESS:
903 break;
904 case SD_RES_READONLY:
905 if (s->inode.vdi_id == oid_to_vid(aio_req->oid)) {
906 ret = reload_inode(s, 0, "");
907 if (ret < 0) {
908 goto err;
909 }
910 }
911 if (is_data_obj(aio_req->oid)) {
912 aio_req->oid = vid_to_data_oid(s->inode.vdi_id,
913 data_oid_to_idx(aio_req->oid));
914 } else {
915 aio_req->oid = vid_to_vdi_oid(s->inode.vdi_id);
916 }
917 resend_aioreq(s, aio_req);
918 return;
919 default:
920 acb->ret = -EIO;
921 error_report("%s", sd_strerror(rsp.result));
922 break;
923 }
924
925 g_free(aio_req);
926
927 if (!--acb->nr_pending) {
928 /*
929 * We've finished all requests which belong to the AIOCB, so
930 * we can switch back to sd_co_readv/writev now.
931 */
932 aio_co_wake(acb->coroutine);
933 }
934
935 return;
936
937 err:
938 reconnect_to_sdog(opaque);
939 }
940
941 static void co_read_response(void *opaque)
942 {
943 BDRVSheepdogState *s = opaque;
944
945 if (!s->co_recv) {
946 s->co_recv = qemu_coroutine_create(aio_read_response, opaque);
947 }
948
949 aio_co_enter(s->aio_context, s->co_recv);
950 }
951
952 static void co_write_request(void *opaque)
953 {
954 BDRVSheepdogState *s = opaque;
955
956 aio_co_wake(s->co_send);
957 }
958
959 /*
960 * Return a socket descriptor to read/write objects.
961 *
962 * We cannot use this descriptor for other operations because
963 * the block driver may be on waiting response from the server.
964 */
965 static int get_sheep_fd(BDRVSheepdogState *s, Error **errp)
966 {
967 int fd;
968
969 fd = connect_to_sdog(s, errp);
970 if (fd < 0) {
971 return fd;
972 }
973
974 aio_set_fd_handler(s->aio_context, fd, false,
975 co_read_response, NULL, NULL, s);
976 return fd;
977 }
978
979 /*
980 * Parse numeric snapshot ID in @str
981 * If @str can't be parsed as number, return false.
982 * Else, if the number is zero or too large, set *@snapid to zero and
983 * return true.
984 * Else, set *@snapid to the number and return true.
985 */
986 static bool sd_parse_snapid(const char *str, uint32_t *snapid)
987 {
988 unsigned long ul;
989 int ret;
990
991 ret = qemu_strtoul(str, NULL, 10, &ul);
992 if (ret == -ERANGE) {
993 ul = ret = 0;
994 }
995 if (ret) {
996 return false;
997 }
998 if (ul > UINT32_MAX) {
999 ul = 0;
1000 }
1001
1002 *snapid = ul;
1003 return true;
1004 }
1005
1006 static bool sd_parse_snapid_or_tag(const char *str,
1007 uint32_t *snapid, char tag[])
1008 {
1009 if (!sd_parse_snapid(str, snapid)) {
1010 *snapid = 0;
1011 if (g_strlcpy(tag, str, SD_MAX_VDI_TAG_LEN) >= SD_MAX_VDI_TAG_LEN) {
1012 return false;
1013 }
1014 } else if (!*snapid) {
1015 return false;
1016 } else {
1017 tag[0] = 0;
1018 }
1019 return true;
1020 }
1021
1022 typedef struct {
1023 const char *path; /* non-null iff transport is tcp */
1024 const char *host; /* valid when transport is tcp */
1025 int port; /* valid when transport is tcp */
1026 char vdi[SD_MAX_VDI_LEN];
1027 char tag[SD_MAX_VDI_TAG_LEN];
1028 uint32_t snap_id;
1029 /* Remainder is only for sd_config_done() */
1030 URI *uri;
1031 QueryParams *qp;
1032 } SheepdogConfig;
1033
1034 static void sd_config_done(SheepdogConfig *cfg)
1035 {
1036 if (cfg->qp) {
1037 query_params_free(cfg->qp);
1038 }
1039 uri_free(cfg->uri);
1040 }
1041
1042 static void sd_parse_uri(SheepdogConfig *cfg, const char *filename,
1043 Error **errp)
1044 {
1045 Error *err = NULL;
1046 QueryParams *qp = NULL;
1047 bool is_unix;
1048 URI *uri;
1049
1050 memset(cfg, 0, sizeof(*cfg));
1051
1052 cfg->uri = uri = uri_parse(filename);
1053 if (!uri) {
1054 error_setg(&err, "invalid URI");
1055 goto out;
1056 }
1057
1058 /* transport */
1059 if (!g_strcmp0(uri->scheme, "sheepdog")) {
1060 is_unix = false;
1061 } else if (!g_strcmp0(uri->scheme, "sheepdog+tcp")) {
1062 is_unix = false;
1063 } else if (!g_strcmp0(uri->scheme, "sheepdog+unix")) {
1064 is_unix = true;
1065 } else {
1066 error_setg(&err, "URI scheme must be 'sheepdog', 'sheepdog+tcp',"
1067 " or 'sheepdog+unix'");
1068 goto out;
1069 }
1070
1071 if (uri->path == NULL || !strcmp(uri->path, "/")) {
1072 error_setg(&err, "missing file path in URI");
1073 goto out;
1074 }
1075 if (g_strlcpy(cfg->vdi, uri->path + 1, SD_MAX_VDI_LEN)
1076 >= SD_MAX_VDI_LEN) {
1077 error_setg(&err, "VDI name is too long");
1078 goto out;
1079 }
1080
1081 cfg->qp = qp = query_params_parse(uri->query);
1082
1083 if (is_unix) {
1084 /* sheepdog+unix:///vdiname?socket=path */
1085 if (uri->server || uri->port) {
1086 error_setg(&err, "URI scheme %s doesn't accept a server address",
1087 uri->scheme);
1088 goto out;
1089 }
1090 if (!qp->n) {
1091 error_setg(&err,
1092 "URI scheme %s requires query parameter 'socket'",
1093 uri->scheme);
1094 goto out;
1095 }
1096 if (qp->n != 1 || strcmp(qp->p[0].name, "socket")) {
1097 error_setg(&err, "unexpected query parameters");
1098 goto out;
1099 }
1100 cfg->path = qp->p[0].value;
1101 } else {
1102 /* sheepdog[+tcp]://[host:port]/vdiname */
1103 if (qp->n) {
1104 error_setg(&err, "unexpected query parameters");
1105 goto out;
1106 }
1107 cfg->host = uri->server;
1108 cfg->port = uri->port;
1109 }
1110
1111 /* snapshot tag */
1112 if (uri->fragment) {
1113 if (!sd_parse_snapid_or_tag(uri->fragment,
1114 &cfg->snap_id, cfg->tag)) {
1115 error_setg(&err, "'%s' is not a valid snapshot ID",
1116 uri->fragment);
1117 goto out;
1118 }
1119 } else {
1120 cfg->snap_id = CURRENT_VDI_ID; /* search current vdi */
1121 }
1122
1123 out:
1124 if (err) {
1125 error_propagate(errp, err);
1126 sd_config_done(cfg);
1127 }
1128 }
1129
1130 /*
1131 * Parse a filename (old syntax)
1132 *
1133 * filename must be one of the following formats:
1134 * 1. [vdiname]
1135 * 2. [vdiname]:[snapid]
1136 * 3. [vdiname]:[tag]
1137 * 4. [hostname]:[port]:[vdiname]
1138 * 5. [hostname]:[port]:[vdiname]:[snapid]
1139 * 6. [hostname]:[port]:[vdiname]:[tag]
1140 *
1141 * You can boot from the snapshot images by specifying `snapid` or
1142 * `tag'.
1143 *
1144 * You can run VMs outside the Sheepdog cluster by specifying
1145 * `hostname' and `port' (experimental).
1146 */
1147 static void parse_vdiname(SheepdogConfig *cfg, const char *filename,
1148 Error **errp)
1149 {
1150 Error *err = NULL;
1151 char *p, *q, *uri;
1152 const char *host_spec, *vdi_spec;
1153 int nr_sep;
1154
1155 strstart(filename, "sheepdog:", &filename);
1156 p = q = g_strdup(filename);
1157
1158 /* count the number of separators */
1159 nr_sep = 0;
1160 while (*p) {
1161 if (*p == ':') {
1162 nr_sep++;
1163 }
1164 p++;
1165 }
1166 p = q;
1167
1168 /* use the first two tokens as host_spec. */
1169 if (nr_sep >= 2) {
1170 host_spec = p;
1171 p = strchr(p, ':');
1172 p++;
1173 p = strchr(p, ':');
1174 *p++ = '\0';
1175 } else {
1176 host_spec = "";
1177 }
1178
1179 vdi_spec = p;
1180
1181 p = strchr(vdi_spec, ':');
1182 if (p) {
1183 *p++ = '#';
1184 }
1185
1186 uri = g_strdup_printf("sheepdog://%s/%s", host_spec, vdi_spec);
1187
1188 /*
1189 * FIXME We to escape URI meta-characters, e.g. "x?y=z"
1190 * produces "sheepdog://x?y=z". Because of that ...
1191 */
1192 sd_parse_uri(cfg, uri, &err);
1193 if (err) {
1194 /*
1195 * ... this can fail, but the error message is misleading.
1196 * Replace it by the traditional useless one until the
1197 * escaping is fixed.
1198 */
1199 error_free(err);
1200 error_setg(errp, "Can't parse filename");
1201 }
1202
1203 g_free(q);
1204 g_free(uri);
1205 }
1206
1207 static void sd_parse_filename(const char *filename, QDict *options,
1208 Error **errp)
1209 {
1210 Error *err = NULL;
1211 SheepdogConfig cfg;
1212 char buf[32];
1213
1214 if (strstr(filename, "://")) {
1215 sd_parse_uri(&cfg, filename, &err);
1216 } else {
1217 parse_vdiname(&cfg, filename, &err);
1218 }
1219 if (err) {
1220 error_propagate(errp, err);
1221 return;
1222 }
1223
1224 if (cfg.path) {
1225 qdict_set_default_str(options, "server.path", cfg.path);
1226 qdict_set_default_str(options, "server.type", "unix");
1227 } else {
1228 qdict_set_default_str(options, "server.type", "inet");
1229 qdict_set_default_str(options, "server.host",
1230 cfg.host ?: SD_DEFAULT_ADDR);
1231 snprintf(buf, sizeof(buf), "%d", cfg.port ?: SD_DEFAULT_PORT);
1232 qdict_set_default_str(options, "server.port", buf);
1233 }
1234 qdict_set_default_str(options, "vdi", cfg.vdi);
1235 qdict_set_default_str(options, "tag", cfg.tag);
1236 if (cfg.snap_id) {
1237 snprintf(buf, sizeof(buf), "%d", cfg.snap_id);
1238 qdict_set_default_str(options, "snap-id", buf);
1239 }
1240
1241 sd_config_done(&cfg);
1242 }
1243
1244 static int find_vdi_name(BDRVSheepdogState *s, const char *filename,
1245 uint32_t snapid, const char *tag, uint32_t *vid,
1246 bool lock, Error **errp)
1247 {
1248 int ret, fd;
1249 SheepdogVdiReq hdr;
1250 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
1251 unsigned int wlen, rlen = 0;
1252 char buf[SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN];
1253
1254 fd = connect_to_sdog(s, errp);
1255 if (fd < 0) {
1256 return fd;
1257 }
1258
1259 /* This pair of strncpy calls ensures that the buffer is zero-filled,
1260 * which is desirable since we'll soon be sending those bytes, and
1261 * don't want the send_req to read uninitialized data.
1262 */
1263 strncpy(buf, filename, SD_MAX_VDI_LEN);
1264 strncpy(buf + SD_MAX_VDI_LEN, tag, SD_MAX_VDI_TAG_LEN);
1265
1266 memset(&hdr, 0, sizeof(hdr));
1267 if (lock) {
1268 hdr.opcode = SD_OP_LOCK_VDI;
1269 hdr.type = LOCK_TYPE_NORMAL;
1270 } else {
1271 hdr.opcode = SD_OP_GET_VDI_INFO;
1272 }
1273 wlen = SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN;
1274 hdr.proto_ver = SD_PROTO_VER;
1275 hdr.data_length = wlen;
1276 hdr.snapid = snapid;
1277 hdr.flags = SD_FLAG_CMD_WRITE;
1278
1279 ret = do_req(fd, s->bs, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
1280 if (ret) {
1281 error_setg_errno(errp, -ret, "cannot get vdi info");
1282 goto out;
1283 }
1284
1285 if (rsp->result != SD_RES_SUCCESS) {
1286 error_setg(errp, "cannot get vdi info, %s, %s %" PRIu32 " %s",
1287 sd_strerror(rsp->result), filename, snapid, tag);
1288 if (rsp->result == SD_RES_NO_VDI) {
1289 ret = -ENOENT;
1290 } else if (rsp->result == SD_RES_VDI_LOCKED) {
1291 ret = -EBUSY;
1292 } else {
1293 ret = -EIO;
1294 }
1295 goto out;
1296 }
1297 *vid = rsp->vdi_id;
1298
1299 ret = 0;
1300 out:
1301 closesocket(fd);
1302 return ret;
1303 }
1304
1305 static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
1306 struct iovec *iov, int niov,
1307 enum AIOCBState aiocb_type)
1308 {
1309 int nr_copies = s->inode.nr_copies;
1310 SheepdogObjReq hdr;
1311 unsigned int wlen = 0;
1312 int ret;
1313 uint64_t oid = aio_req->oid;
1314 unsigned int datalen = aio_req->data_len;
1315 uint64_t offset = aio_req->offset;
1316 uint8_t flags = aio_req->flags;
1317 uint64_t old_oid = aio_req->base_oid;
1318 bool create = aio_req->create;
1319
1320 qemu_co_mutex_lock(&s->queue_lock);
1321 QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
1322 qemu_co_mutex_unlock(&s->queue_lock);
1323
1324 if (!nr_copies) {
1325 error_report("bug");
1326 }
1327
1328 memset(&hdr, 0, sizeof(hdr));
1329
1330 switch (aiocb_type) {
1331 case AIOCB_FLUSH_CACHE:
1332 hdr.opcode = SD_OP_FLUSH_VDI;
1333 break;
1334 case AIOCB_READ_UDATA:
1335 hdr.opcode = SD_OP_READ_OBJ;
1336 hdr.flags = flags;
1337 break;
1338 case AIOCB_WRITE_UDATA:
1339 if (create) {
1340 hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ;
1341 } else {
1342 hdr.opcode = SD_OP_WRITE_OBJ;
1343 }
1344 wlen = datalen;
1345 hdr.flags = SD_FLAG_CMD_WRITE | flags;
1346 break;
1347 case AIOCB_DISCARD_OBJ:
1348 hdr.opcode = SD_OP_WRITE_OBJ;
1349 hdr.flags = SD_FLAG_CMD_WRITE | flags;
1350 s->inode.data_vdi_id[data_oid_to_idx(oid)] = 0;
1351 offset = offsetof(SheepdogInode,
1352 data_vdi_id[data_oid_to_idx(oid)]);
1353 oid = vid_to_vdi_oid(s->inode.vdi_id);
1354 wlen = datalen = sizeof(uint32_t);
1355 break;
1356 }
1357
1358 if (s->cache_flags) {
1359 hdr.flags |= s->cache_flags;
1360 }
1361
1362 hdr.oid = oid;
1363 hdr.cow_oid = old_oid;
1364 hdr.copies = s->inode.nr_copies;
1365
1366 hdr.data_length = datalen;
1367 hdr.offset = offset;
1368
1369 hdr.id = aio_req->id;
1370
1371 qemu_co_mutex_lock(&s->lock);
1372 s->co_send = qemu_coroutine_self();
1373 aio_set_fd_handler(s->aio_context, s->fd, false,
1374 co_read_response, co_write_request, NULL, s);
1375 socket_set_cork(s->fd, 1);
1376
1377 /* send a header */
1378 ret = qemu_co_send(s->fd, &hdr, sizeof(hdr));
1379 if (ret != sizeof(hdr)) {
1380 error_report("failed to send a req, %s", strerror(errno));
1381 goto out;
1382 }
1383
1384 if (wlen) {
1385 ret = qemu_co_sendv(s->fd, iov, niov, aio_req->iov_offset, wlen);
1386 if (ret != wlen) {
1387 error_report("failed to send a data, %s", strerror(errno));
1388 }
1389 }
1390 out:
1391 socket_set_cork(s->fd, 0);
1392 aio_set_fd_handler(s->aio_context, s->fd, false,
1393 co_read_response, NULL, NULL, s);
1394 s->co_send = NULL;
1395 qemu_co_mutex_unlock(&s->lock);
1396 }
1397
1398 static int read_write_object(int fd, BlockDriverState *bs, char *buf,
1399 uint64_t oid, uint8_t copies,
1400 unsigned int datalen, uint64_t offset,
1401 bool write, bool create, uint32_t cache_flags)
1402 {
1403 SheepdogObjReq hdr;
1404 SheepdogObjRsp *rsp = (SheepdogObjRsp *)&hdr;
1405 unsigned int wlen, rlen;
1406 int ret;
1407
1408 memset(&hdr, 0, sizeof(hdr));
1409
1410 if (write) {
1411 wlen = datalen;
1412 rlen = 0;
1413 hdr.flags = SD_FLAG_CMD_WRITE;
1414 if (create) {
1415 hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ;
1416 } else {
1417 hdr.opcode = SD_OP_WRITE_OBJ;
1418 }
1419 } else {
1420 wlen = 0;
1421 rlen = datalen;
1422 hdr.opcode = SD_OP_READ_OBJ;
1423 }
1424
1425 hdr.flags |= cache_flags;
1426
1427 hdr.oid = oid;
1428 hdr.data_length = datalen;
1429 hdr.offset = offset;
1430 hdr.copies = copies;
1431
1432 ret = do_req(fd, bs, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
1433 if (ret) {
1434 error_report("failed to send a request to the sheep");
1435 return ret;
1436 }
1437
1438 switch (rsp->result) {
1439 case SD_RES_SUCCESS:
1440 return 0;
1441 default:
1442 error_report("%s", sd_strerror(rsp->result));
1443 return -EIO;
1444 }
1445 }
1446
1447 static int read_object(int fd, BlockDriverState *bs, char *buf,
1448 uint64_t oid, uint8_t copies,
1449 unsigned int datalen, uint64_t offset,
1450 uint32_t cache_flags)
1451 {
1452 return read_write_object(fd, bs, buf, oid, copies,
1453 datalen, offset, false,
1454 false, cache_flags);
1455 }
1456
1457 static int write_object(int fd, BlockDriverState *bs, char *buf,
1458 uint64_t oid, uint8_t copies,
1459 unsigned int datalen, uint64_t offset, bool create,
1460 uint32_t cache_flags)
1461 {
1462 return read_write_object(fd, bs, buf, oid, copies,
1463 datalen, offset, true,
1464 create, cache_flags);
1465 }
1466
1467 /* update inode with the latest state */
1468 static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag)
1469 {
1470 Error *local_err = NULL;
1471 SheepdogInode *inode;
1472 int ret = 0, fd;
1473 uint32_t vid = 0;
1474
1475 fd = connect_to_sdog(s, &local_err);
1476 if (fd < 0) {
1477 error_report_err(local_err);
1478 return -EIO;
1479 }
1480
1481 inode = g_malloc(SD_INODE_HEADER_SIZE);
1482
1483 ret = find_vdi_name(s, s->name, snapid, tag, &vid, false, &local_err);
1484 if (ret) {
1485 error_report_err(local_err);
1486 goto out;
1487 }
1488
1489 ret = read_object(fd, s->bs, (char *)inode, vid_to_vdi_oid(vid),
1490 s->inode.nr_copies, SD_INODE_HEADER_SIZE, 0,
1491 s->cache_flags);
1492 if (ret < 0) {
1493 goto out;
1494 }
1495
1496 if (inode->vdi_id != s->inode.vdi_id) {
1497 memcpy(&s->inode, inode, SD_INODE_HEADER_SIZE);
1498 }
1499
1500 out:
1501 g_free(inode);
1502 closesocket(fd);
1503
1504 return ret;
1505 }
1506
1507 static void coroutine_fn resend_aioreq(BDRVSheepdogState *s, AIOReq *aio_req)
1508 {
1509 SheepdogAIOCB *acb = aio_req->aiocb;
1510
1511 aio_req->create = false;
1512
1513 /* check whether this request becomes a CoW one */
1514 if (acb->aiocb_type == AIOCB_WRITE_UDATA && is_data_obj(aio_req->oid)) {
1515 int idx = data_oid_to_idx(aio_req->oid);
1516
1517 if (is_data_obj_writable(&s->inode, idx)) {
1518 goto out;
1519 }
1520
1521 if (s->inode.data_vdi_id[idx]) {
1522 aio_req->base_oid = vid_to_data_oid(s->inode.data_vdi_id[idx], idx);
1523 aio_req->flags |= SD_FLAG_CMD_COW;
1524 }
1525 aio_req->create = true;
1526 }
1527 out:
1528 if (is_data_obj(aio_req->oid)) {
1529 add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov,
1530 acb->aiocb_type);
1531 } else {
1532 struct iovec iov;
1533 iov.iov_base = &s->inode;
1534 iov.iov_len = sizeof(s->inode);
1535 add_aio_request(s, aio_req, &iov, 1, AIOCB_WRITE_UDATA);
1536 }
1537 }
1538
1539 static void sd_detach_aio_context(BlockDriverState *bs)
1540 {
1541 BDRVSheepdogState *s = bs->opaque;
1542
1543 aio_set_fd_handler(s->aio_context, s->fd, false, NULL,
1544 NULL, NULL, NULL);
1545 }
1546
1547 static void sd_attach_aio_context(BlockDriverState *bs,
1548 AioContext *new_context)
1549 {
1550 BDRVSheepdogState *s = bs->opaque;
1551
1552 s->aio_context = new_context;
1553 aio_set_fd_handler(new_context, s->fd, false,
1554 co_read_response, NULL, NULL, s);
1555 }
1556
1557 static QemuOptsList runtime_opts = {
1558 .name = "sheepdog",
1559 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
1560 .desc = {
1561 {
1562 .name = "vdi",
1563 .type = QEMU_OPT_STRING,
1564 },
1565 {
1566 .name = "snap-id",
1567 .type = QEMU_OPT_NUMBER,
1568 },
1569 {
1570 .name = "tag",
1571 .type = QEMU_OPT_STRING,
1572 },
1573 { /* end of list */ }
1574 },
1575 };
1576
1577 static int sd_open(BlockDriverState *bs, QDict *options, int flags,
1578 Error **errp)
1579 {
1580 int ret, fd;
1581 uint32_t vid = 0;
1582 BDRVSheepdogState *s = bs->opaque;
1583 const char *vdi, *snap_id_str, *tag;
1584 uint64_t snap_id;
1585 char *buf = NULL;
1586 QemuOpts *opts;
1587 Error *local_err = NULL;
1588
1589 s->bs = bs;
1590 s->aio_context = bdrv_get_aio_context(bs);
1591
1592 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
1593 qemu_opts_absorb_qdict(opts, options, &local_err);
1594 if (local_err) {
1595 error_propagate(errp, local_err);
1596 ret = -EINVAL;
1597 goto err_no_fd;
1598 }
1599
1600 s->addr = sd_server_config(options, errp);
1601 if (!s->addr) {
1602 ret = -EINVAL;
1603 goto err_no_fd;
1604 }
1605
1606 vdi = qemu_opt_get(opts, "vdi");
1607 snap_id_str = qemu_opt_get(opts, "snap-id");
1608 snap_id = qemu_opt_get_number(opts, "snap-id", CURRENT_VDI_ID);
1609 tag = qemu_opt_get(opts, "tag");
1610
1611 if (!vdi) {
1612 error_setg(errp, "parameter 'vdi' is missing");
1613 ret = -EINVAL;
1614 goto err_no_fd;
1615 }
1616 if (strlen(vdi) >= SD_MAX_VDI_LEN) {
1617 error_setg(errp, "value of parameter 'vdi' is too long");
1618 ret = -EINVAL;
1619 goto err_no_fd;
1620 }
1621
1622 if (snap_id > UINT32_MAX) {
1623 snap_id = 0;
1624 }
1625 if (snap_id_str && !snap_id) {
1626 error_setg(errp, "'snap-id=%s' is not a valid snapshot ID",
1627 snap_id_str);
1628 ret = -EINVAL;
1629 goto err_no_fd;
1630 }
1631
1632 if (!tag) {
1633 tag = "";
1634 }
1635 if (strlen(tag) >= SD_MAX_VDI_TAG_LEN) {
1636 error_setg(errp, "value of parameter 'tag' is too long");
1637 ret = -EINVAL;
1638 goto err_no_fd;
1639 }
1640
1641 QLIST_INIT(&s->inflight_aio_head);
1642 QLIST_INIT(&s->failed_aio_head);
1643 QLIST_INIT(&s->inflight_aiocb_head);
1644
1645 s->fd = get_sheep_fd(s, errp);
1646 if (s->fd < 0) {
1647 ret = s->fd;
1648 goto err_no_fd;
1649 }
1650
1651 ret = find_vdi_name(s, vdi, (uint32_t)snap_id, tag, &vid, true, errp);
1652 if (ret) {
1653 goto err;
1654 }
1655
1656 /*
1657 * QEMU block layer emulates writethrough cache as 'writeback + flush', so
1658 * we always set SD_FLAG_CMD_CACHE (writeback cache) as default.
1659 */
1660 s->cache_flags = SD_FLAG_CMD_CACHE;
1661 if (flags & BDRV_O_NOCACHE) {
1662 s->cache_flags = SD_FLAG_CMD_DIRECT;
1663 }
1664 s->discard_supported = true;
1665
1666 if (snap_id || tag[0]) {
1667 DPRINTF("%" PRIx32 " snapshot inode was open.\n", vid);
1668 s->is_snapshot = true;
1669 }
1670
1671 fd = connect_to_sdog(s, errp);
1672 if (fd < 0) {
1673 ret = fd;
1674 goto err;
1675 }
1676
1677 buf = g_malloc(SD_INODE_SIZE);
1678 ret = read_object(fd, s->bs, buf, vid_to_vdi_oid(vid),
1679 0, SD_INODE_SIZE, 0, s->cache_flags);
1680
1681 closesocket(fd);
1682
1683 if (ret) {
1684 error_setg(errp, "Can't read snapshot inode");
1685 goto err;
1686 }
1687
1688 memcpy(&s->inode, buf, sizeof(s->inode));
1689
1690 bs->total_sectors = s->inode.vdi_size / BDRV_SECTOR_SIZE;
1691 pstrcpy(s->name, sizeof(s->name), vdi);
1692 qemu_co_mutex_init(&s->lock);
1693 qemu_co_mutex_init(&s->queue_lock);
1694 qemu_co_queue_init(&s->overlapping_queue);
1695 qemu_opts_del(opts);
1696 g_free(buf);
1697 return 0;
1698
1699 err:
1700 aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
1701 false, NULL, NULL, NULL, NULL);
1702 closesocket(s->fd);
1703 err_no_fd:
1704 qemu_opts_del(opts);
1705 g_free(buf);
1706 return ret;
1707 }
1708
1709 static int sd_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue,
1710 Error **errp)
1711 {
1712 BDRVSheepdogState *s = state->bs->opaque;
1713 BDRVSheepdogReopenState *re_s;
1714 int ret = 0;
1715
1716 re_s = state->opaque = g_new0(BDRVSheepdogReopenState, 1);
1717
1718 re_s->cache_flags = SD_FLAG_CMD_CACHE;
1719 if (state->flags & BDRV_O_NOCACHE) {
1720 re_s->cache_flags = SD_FLAG_CMD_DIRECT;
1721 }
1722
1723 re_s->fd = get_sheep_fd(s, errp);
1724 if (re_s->fd < 0) {
1725 ret = re_s->fd;
1726 return ret;
1727 }
1728
1729 return ret;
1730 }
1731
1732 static void sd_reopen_commit(BDRVReopenState *state)
1733 {
1734 BDRVSheepdogReopenState *re_s = state->opaque;
1735 BDRVSheepdogState *s = state->bs->opaque;
1736
1737 if (s->fd) {
1738 aio_set_fd_handler(s->aio_context, s->fd, false,
1739 NULL, NULL, NULL, NULL);
1740 closesocket(s->fd);
1741 }
1742
1743 s->fd = re_s->fd;
1744 s->cache_flags = re_s->cache_flags;
1745
1746 g_free(state->opaque);
1747 state->opaque = NULL;
1748
1749 return;
1750 }
1751
1752 static void sd_reopen_abort(BDRVReopenState *state)
1753 {
1754 BDRVSheepdogReopenState *re_s = state->opaque;
1755 BDRVSheepdogState *s = state->bs->opaque;
1756
1757 if (re_s == NULL) {
1758 return;
1759 }
1760
1761 if (re_s->fd) {
1762 aio_set_fd_handler(s->aio_context, re_s->fd, false,
1763 NULL, NULL, NULL, NULL);
1764 closesocket(re_s->fd);
1765 }
1766
1767 g_free(state->opaque);
1768 state->opaque = NULL;
1769
1770 return;
1771 }
1772
1773 static int do_sd_create(BDRVSheepdogState *s, uint32_t *vdi_id, int snapshot,
1774 Error **errp)
1775 {
1776 SheepdogVdiReq hdr;
1777 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
1778 int fd, ret;
1779 unsigned int wlen, rlen = 0;
1780 char buf[SD_MAX_VDI_LEN];
1781
1782 fd = connect_to_sdog(s, errp);
1783 if (fd < 0) {
1784 return fd;
1785 }
1786
1787 /* FIXME: would it be better to fail (e.g., return -EIO) when filename
1788 * does not fit in buf? For now, just truncate and avoid buffer overrun.
1789 */
1790 memset(buf, 0, sizeof(buf));
1791 pstrcpy(buf, sizeof(buf), s->name);
1792
1793 memset(&hdr, 0, sizeof(hdr));
1794 hdr.opcode = SD_OP_NEW_VDI;
1795 hdr.base_vdi_id = s->inode.vdi_id;
1796
1797 wlen = SD_MAX_VDI_LEN;
1798
1799 hdr.flags = SD_FLAG_CMD_WRITE;
1800 hdr.snapid = snapshot;
1801
1802 hdr.data_length = wlen;
1803 hdr.vdi_size = s->inode.vdi_size;
1804 hdr.copy_policy = s->inode.copy_policy;
1805 hdr.copies = s->inode.nr_copies;
1806 hdr.block_size_shift = s->inode.block_size_shift;
1807
1808 ret = do_req(fd, NULL, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
1809
1810 closesocket(fd);
1811
1812 if (ret) {
1813 error_setg_errno(errp, -ret, "create failed");
1814 return ret;
1815 }
1816
1817 if (rsp->result != SD_RES_SUCCESS) {
1818 error_setg(errp, "%s, %s", sd_strerror(rsp->result), s->inode.name);
1819 return -EIO;
1820 }
1821
1822 if (vdi_id) {
1823 *vdi_id = rsp->vdi_id;
1824 }
1825
1826 return 0;
1827 }
1828
1829 static int sd_prealloc(BlockDriverState *bs, int64_t old_size, int64_t new_size,
1830 Error **errp)
1831 {
1832 BlockBackend *blk = NULL;
1833 BDRVSheepdogState *base = bs->opaque;
1834 unsigned long buf_size;
1835 uint32_t idx, max_idx;
1836 uint32_t object_size;
1837 void *buf = NULL;
1838 int ret;
1839
1840 blk = blk_new(BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_RESIZE,
1841 BLK_PERM_ALL);
1842
1843 ret = blk_insert_bs(blk, bs, errp);
1844 if (ret < 0) {
1845 goto out_with_err_set;
1846 }
1847
1848 blk_set_allow_write_beyond_eof(blk, true);
1849
1850 object_size = (UINT32_C(1) << base->inode.block_size_shift);
1851 buf_size = MIN(object_size, SD_DATA_OBJ_SIZE);
1852 buf = g_malloc0(buf_size);
1853
1854 max_idx = DIV_ROUND_UP(new_size, buf_size);
1855
1856 for (idx = old_size / buf_size; idx < max_idx; idx++) {
1857 /*
1858 * The created image can be a cloned image, so we need to read
1859 * a data from the source image.
1860 */
1861 ret = blk_pread(blk, idx * buf_size, buf, buf_size);
1862 if (ret < 0) {
1863 goto out;
1864 }
1865 ret = blk_pwrite(blk, idx * buf_size, buf, buf_size, 0);
1866 if (ret < 0) {
1867 goto out;
1868 }
1869 }
1870
1871 ret = 0;
1872 out:
1873 if (ret < 0) {
1874 error_setg_errno(errp, -ret, "Can't pre-allocate");
1875 }
1876 out_with_err_set:
1877 if (blk) {
1878 blk_unref(blk);
1879 }
1880 g_free(buf);
1881
1882 return ret;
1883 }
1884
1885 /*
1886 * Sheepdog support two kinds of redundancy, full replication and erasure
1887 * coding.
1888 *
1889 * # create a fully replicated vdi with x copies
1890 * -o redundancy=x (1 <= x <= SD_MAX_COPIES)
1891 *
1892 * # create a erasure coded vdi with x data strips and y parity strips
1893 * -o redundancy=x:y (x must be one of {2,4,8,16} and 1 <= y < SD_EC_MAX_STRIP)
1894 */
1895 static int parse_redundancy(BDRVSheepdogState *s, const char *opt)
1896 {
1897 struct SheepdogInode *inode = &s->inode;
1898 const char *n1, *n2;
1899 long copy, parity;
1900 char p[10];
1901
1902 pstrcpy(p, sizeof(p), opt);
1903 n1 = strtok(p, ":");
1904 n2 = strtok(NULL, ":");
1905
1906 if (!n1) {
1907 return -EINVAL;
1908 }
1909
1910 copy = strtol(n1, NULL, 10);
1911 /* FIXME fix error checking by switching to qemu_strtol() */
1912 if (copy > SD_MAX_COPIES || copy < 1) {
1913 return -EINVAL;
1914 }
1915 if (!n2) {
1916 inode->copy_policy = 0;
1917 inode->nr_copies = copy;
1918 return 0;
1919 }
1920
1921 if (copy != 2 && copy != 4 && copy != 8 && copy != 16) {
1922 return -EINVAL;
1923 }
1924
1925 parity = strtol(n2, NULL, 10);
1926 /* FIXME fix error checking by switching to qemu_strtol() */
1927 if (parity >= SD_EC_MAX_STRIP || parity < 1) {
1928 return -EINVAL;
1929 }
1930
1931 /*
1932 * 4 bits for parity and 4 bits for data.
1933 * We have to compress upper data bits because it can't represent 16
1934 */
1935 inode->copy_policy = ((copy / 2) << 4) + parity;
1936 inode->nr_copies = copy + parity;
1937
1938 return 0;
1939 }
1940
1941 static int parse_block_size_shift(BDRVSheepdogState *s, QemuOpts *opt)
1942 {
1943 struct SheepdogInode *inode = &s->inode;
1944 uint64_t object_size;
1945 int obj_order;
1946
1947 object_size = qemu_opt_get_size_del(opt, BLOCK_OPT_OBJECT_SIZE, 0);
1948 if (object_size) {
1949 if ((object_size - 1) & object_size) { /* not a power of 2? */
1950 return -EINVAL;
1951 }
1952 obj_order = ctz32(object_size);
1953 if (obj_order < 20 || obj_order > 31) {
1954 return -EINVAL;
1955 }
1956 inode->block_size_shift = (uint8_t)obj_order;
1957 }
1958
1959 return 0;
1960 }
1961
1962 static int coroutine_fn sd_co_create_opts(const char *filename, QemuOpts *opts,
1963 Error **errp)
1964 {
1965 Error *err = NULL;
1966 int ret = 0;
1967 uint32_t vid = 0;
1968 char *backing_file = NULL;
1969 char *buf = NULL;
1970 BDRVSheepdogState *s;
1971 SheepdogConfig cfg;
1972 uint64_t max_vdi_size;
1973 bool prealloc = false;
1974
1975 s = g_new0(BDRVSheepdogState, 1);
1976
1977 if (strstr(filename, "://")) {
1978 sd_parse_uri(&cfg, filename, &err);
1979 } else {
1980 parse_vdiname(&cfg, filename, &err);
1981 }
1982 if (err) {
1983 error_propagate(errp, err);
1984 goto out;
1985 }
1986
1987 buf = cfg.port ? g_strdup_printf("%d", cfg.port) : NULL;
1988 s->addr = sd_socket_address(cfg.path, cfg.host, buf);
1989 g_free(buf);
1990 strcpy(s->name, cfg.vdi);
1991 sd_config_done(&cfg);
1992
1993 s->inode.vdi_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
1994 BDRV_SECTOR_SIZE);
1995 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
1996 buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
1997 if (!buf || !strcmp(buf, "off")) {
1998 prealloc = false;
1999 } else if (!strcmp(buf, "full")) {
2000 prealloc = true;
2001 } else {
2002 error_setg(errp, "Invalid preallocation mode: '%s'", buf);
2003 ret = -EINVAL;
2004 goto out;
2005 }
2006
2007 g_free(buf);
2008 buf = qemu_opt_get_del(opts, BLOCK_OPT_REDUNDANCY);
2009 if (buf) {
2010 ret = parse_redundancy(s, buf);
2011 if (ret < 0) {
2012 error_setg(errp, "Invalid redundancy mode: '%s'", buf);
2013 goto out;
2014 }
2015 }
2016 ret = parse_block_size_shift(s, opts);
2017 if (ret < 0) {
2018 error_setg(errp, "Invalid object_size."
2019 " obect_size needs to be power of 2"
2020 " and be limited from 2^20 to 2^31");
2021 goto out;
2022 }
2023
2024 if (backing_file) {
2025 BlockBackend *blk;
2026 BDRVSheepdogState *base;
2027 BlockDriver *drv;
2028
2029 /* Currently, only Sheepdog backing image is supported. */
2030 drv = bdrv_find_protocol(backing_file, true, NULL);
2031 if (!drv || strcmp(drv->protocol_name, "sheepdog") != 0) {
2032 error_setg(errp, "backing_file must be a sheepdog image");
2033 ret = -EINVAL;
2034 goto out;
2035 }
2036
2037 blk = blk_new_open(backing_file, NULL, NULL,
2038 BDRV_O_PROTOCOL, errp);
2039 if (blk == NULL) {
2040 ret = -EIO;
2041 goto out;
2042 }
2043
2044 base = blk_bs(blk)->opaque;
2045
2046 if (!is_snapshot(&base->inode)) {
2047 error_setg(errp, "cannot clone from a non snapshot vdi");
2048 blk_unref(blk);
2049 ret = -EINVAL;
2050 goto out;
2051 }
2052 s->inode.vdi_id = base->inode.vdi_id;
2053 blk_unref(blk);
2054 }
2055
2056 s->aio_context = qemu_get_aio_context();
2057
2058 /* if block_size_shift is not specified, get cluster default value */
2059 if (s->inode.block_size_shift == 0) {
2060 SheepdogVdiReq hdr;
2061 SheepdogClusterRsp *rsp = (SheepdogClusterRsp *)&hdr;
2062 int fd;
2063 unsigned int wlen = 0, rlen = 0;
2064
2065 fd = connect_to_sdog(s, errp);
2066 if (fd < 0) {
2067 ret = fd;
2068 goto out;
2069 }
2070
2071 memset(&hdr, 0, sizeof(hdr));
2072 hdr.opcode = SD_OP_GET_CLUSTER_DEFAULT;
2073 hdr.proto_ver = SD_PROTO_VER;
2074
2075 ret = do_req(fd, NULL, (SheepdogReq *)&hdr,
2076 NULL, &wlen, &rlen);
2077 closesocket(fd);
2078 if (ret) {
2079 error_setg_errno(errp, -ret, "failed to get cluster default");
2080 goto out;
2081 }
2082 if (rsp->result == SD_RES_SUCCESS) {
2083 s->inode.block_size_shift = rsp->block_size_shift;
2084 } else {
2085 s->inode.block_size_shift = SD_DEFAULT_BLOCK_SIZE_SHIFT;
2086 }
2087 }
2088
2089 max_vdi_size = (UINT64_C(1) << s->inode.block_size_shift) * MAX_DATA_OBJS;
2090
2091 if (s->inode.vdi_size > max_vdi_size) {
2092 error_setg(errp, "An image is too large."
2093 " The maximum image size is %"PRIu64 "GB",
2094 max_vdi_size / 1024 / 1024 / 1024);
2095 ret = -EINVAL;
2096 goto out;
2097 }
2098
2099 ret = do_sd_create(s, &vid, 0, errp);
2100 if (ret) {
2101 goto out;
2102 }
2103
2104 if (prealloc) {
2105 BlockDriverState *bs;
2106 QDict *opts;
2107
2108 opts = qdict_new();
2109 qdict_put_str(opts, "driver", "sheepdog");
2110 bs = bdrv_open(filename, NULL, opts, BDRV_O_PROTOCOL | BDRV_O_RDWR,
2111 errp);
2112 if (!bs) {
2113 goto out;
2114 }
2115
2116 ret = sd_prealloc(bs, 0, s->inode.vdi_size, errp);
2117
2118 bdrv_unref(bs);
2119 }
2120 out:
2121 g_free(backing_file);
2122 g_free(buf);
2123 g_free(s);
2124 return ret;
2125 }
2126
2127 static void sd_close(BlockDriverState *bs)
2128 {
2129 Error *local_err = NULL;
2130 BDRVSheepdogState *s = bs->opaque;
2131 SheepdogVdiReq hdr;
2132 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
2133 unsigned int wlen, rlen = 0;
2134 int fd, ret;
2135
2136 DPRINTF("%s\n", s->name);
2137
2138 fd = connect_to_sdog(s, &local_err);
2139 if (fd < 0) {
2140 error_report_err(local_err);
2141 return;
2142 }
2143
2144 memset(&hdr, 0, sizeof(hdr));
2145
2146 hdr.opcode = SD_OP_RELEASE_VDI;
2147 hdr.type = LOCK_TYPE_NORMAL;
2148 hdr.base_vdi_id = s->inode.vdi_id;
2149 wlen = strlen(s->name) + 1;
2150 hdr.data_length = wlen;
2151 hdr.flags = SD_FLAG_CMD_WRITE;
2152
2153 ret = do_req(fd, s->bs, (SheepdogReq *)&hdr,
2154 s->name, &wlen, &rlen);
2155
2156 closesocket(fd);
2157
2158 if (!ret && rsp->result != SD_RES_SUCCESS &&
2159 rsp->result != SD_RES_VDI_NOT_LOCKED) {
2160 error_report("%s, %s", sd_strerror(rsp->result), s->name);
2161 }
2162
2163 aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
2164 false, NULL, NULL, NULL, NULL);
2165 closesocket(s->fd);
2166 qapi_free_SocketAddress(s->addr);
2167 }
2168
2169 static int64_t sd_getlength(BlockDriverState *bs)
2170 {
2171 BDRVSheepdogState *s = bs->opaque;
2172
2173 return s->inode.vdi_size;
2174 }
2175
2176 static int sd_truncate(BlockDriverState *bs, int64_t offset,
2177 PreallocMode prealloc, Error **errp)
2178 {
2179 BDRVSheepdogState *s = bs->opaque;
2180 int ret, fd;
2181 unsigned int datalen;
2182 uint64_t max_vdi_size;
2183 int64_t old_size = s->inode.vdi_size;
2184
2185 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_FULL) {
2186 error_setg(errp, "Unsupported preallocation mode '%s'",
2187 PreallocMode_str(prealloc));
2188 return -ENOTSUP;
2189 }
2190
2191 max_vdi_size = (UINT64_C(1) << s->inode.block_size_shift) * MAX_DATA_OBJS;
2192 if (offset < old_size) {
2193 error_setg(errp, "shrinking is not supported");
2194 return -EINVAL;
2195 } else if (offset > max_vdi_size) {
2196 error_setg(errp, "too big image size");
2197 return -EINVAL;
2198 }
2199
2200 fd = connect_to_sdog(s, errp);
2201 if (fd < 0) {
2202 return fd;
2203 }
2204
2205 /* we don't need to update entire object */
2206 datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id);
2207 s->inode.vdi_size = offset;
2208 ret = write_object(fd, s->bs, (char *)&s->inode,
2209 vid_to_vdi_oid(s->inode.vdi_id), s->inode.nr_copies,
2210 datalen, 0, false, s->cache_flags);
2211 close(fd);
2212
2213 if (ret < 0) {
2214 error_setg_errno(errp, -ret, "failed to update an inode");
2215 return ret;
2216 }
2217
2218 if (prealloc == PREALLOC_MODE_FULL) {
2219 ret = sd_prealloc(bs, old_size, offset, errp);
2220 if (ret < 0) {
2221 return ret;
2222 }
2223 }
2224
2225 return 0;
2226 }
2227
2228 /*
2229 * This function is called after writing data objects. If we need to
2230 * update metadata, this sends a write request to the vdi object.
2231 */
2232 static void coroutine_fn sd_write_done(SheepdogAIOCB *acb)
2233 {
2234 BDRVSheepdogState *s = acb->s;
2235 struct iovec iov;
2236 AIOReq *aio_req;
2237 uint32_t offset, data_len, mn, mx;
2238
2239 mn = acb->min_dirty_data_idx;
2240 mx = acb->max_dirty_data_idx;
2241 if (mn <= mx) {
2242 /* we need to update the vdi object. */
2243 ++acb->nr_pending;
2244 offset = sizeof(s->inode) - sizeof(s->inode.data_vdi_id) +
2245 mn * sizeof(s->inode.data_vdi_id[0]);
2246 data_len = (mx - mn + 1) * sizeof(s->inode.data_vdi_id[0]);
2247
2248 acb->min_dirty_data_idx = UINT32_MAX;
2249 acb->max_dirty_data_idx = 0;
2250
2251 iov.iov_base = &s->inode;
2252 iov.iov_len = sizeof(s->inode);
2253 aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id),
2254 data_len, offset, 0, false, 0, offset);
2255 add_aio_request(s, aio_req, &iov, 1, AIOCB_WRITE_UDATA);
2256 if (--acb->nr_pending) {
2257 qemu_coroutine_yield();
2258 }
2259 }
2260 }
2261
2262 /* Delete current working VDI on the snapshot chain */
2263 static bool sd_delete(BDRVSheepdogState *s)
2264 {
2265 Error *local_err = NULL;
2266 unsigned int wlen = SD_MAX_VDI_LEN, rlen = 0;
2267 SheepdogVdiReq hdr = {
2268 .opcode = SD_OP_DEL_VDI,
2269 .base_vdi_id = s->inode.vdi_id,
2270 .data_length = wlen,
2271 .flags = SD_FLAG_CMD_WRITE,
2272 };
2273 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
2274 int fd, ret;
2275
2276 fd = connect_to_sdog(s, &local_err);
2277 if (fd < 0) {
2278 error_report_err(local_err);
2279 return false;
2280 }
2281
2282 ret = do_req(fd, s->bs, (SheepdogReq *)&hdr,
2283 s->name, &wlen, &rlen);
2284 closesocket(fd);
2285 if (ret) {
2286 return false;
2287 }
2288 switch (rsp->result) {
2289 case SD_RES_NO_VDI:
2290 error_report("%s was already deleted", s->name);
2291 /* fall through */
2292 case SD_RES_SUCCESS:
2293 break;
2294 default:
2295 error_report("%s, %s", sd_strerror(rsp->result), s->name);
2296 return false;
2297 }
2298
2299 return true;
2300 }
2301
2302 /*
2303 * Create a writable VDI from a snapshot
2304 */
2305 static int sd_create_branch(BDRVSheepdogState *s)
2306 {
2307 Error *local_err = NULL;
2308 int ret, fd;
2309 uint32_t vid;
2310 char *buf;
2311 bool deleted;
2312
2313 DPRINTF("%" PRIx32 " is snapshot.\n", s->inode.vdi_id);
2314
2315 buf = g_malloc(SD_INODE_SIZE);
2316
2317 /*
2318 * Even If deletion fails, we will just create extra snapshot based on
2319 * the working VDI which was supposed to be deleted. So no need to
2320 * false bail out.
2321 */
2322 deleted = sd_delete(s);
2323 ret = do_sd_create(s, &vid, !deleted, &local_err);
2324 if (ret) {
2325 error_report_err(local_err);
2326 goto out;
2327 }
2328
2329 DPRINTF("%" PRIx32 " is created.\n", vid);
2330
2331 fd = connect_to_sdog(s, &local_err);
2332 if (fd < 0) {
2333 error_report_err(local_err);
2334 ret = fd;
2335 goto out;
2336 }
2337
2338 ret = read_object(fd, s->bs, buf, vid_to_vdi_oid(vid),
2339 s->inode.nr_copies, SD_INODE_SIZE, 0, s->cache_flags);
2340
2341 closesocket(fd);
2342
2343 if (ret < 0) {
2344 goto out;
2345 }
2346
2347 memcpy(&s->inode, buf, sizeof(s->inode));
2348
2349 s->is_snapshot = false;
2350 ret = 0;
2351 DPRINTF("%" PRIx32 " was newly created.\n", s->inode.vdi_id);
2352
2353 out:
2354 g_free(buf);
2355
2356 return ret;
2357 }
2358
2359 /*
2360 * Send I/O requests to the server.
2361 *
2362 * This function sends requests to the server, links the requests to
2363 * the inflight_list in BDRVSheepdogState, and exits without
2364 * waiting the response. The responses are received in the
2365 * `aio_read_response' function which is called from the main loop as
2366 * a fd handler.
2367 *
2368 * Returns 1 when we need to wait a response, 0 when there is no sent
2369 * request and -errno in error cases.
2370 */
2371 static void coroutine_fn sd_co_rw_vector(SheepdogAIOCB *acb)
2372 {
2373 int ret = 0;
2374 unsigned long len, done = 0, total = acb->nb_sectors * BDRV_SECTOR_SIZE;
2375 unsigned long idx;
2376 uint32_t object_size;
2377 uint64_t oid;
2378 uint64_t offset;
2379 BDRVSheepdogState *s = acb->s;
2380 SheepdogInode *inode = &s->inode;
2381 AIOReq *aio_req;
2382
2383 if (acb->aiocb_type == AIOCB_WRITE_UDATA && s->is_snapshot) {
2384 /*
2385 * In the case we open the snapshot VDI, Sheepdog creates the
2386 * writable VDI when we do a write operation first.
2387 */
2388 ret = sd_create_branch(s);
2389 if (ret) {
2390 acb->ret = -EIO;
2391 return;
2392 }
2393 }
2394
2395 object_size = (UINT32_C(1) << inode->block_size_shift);
2396 idx = acb->sector_num * BDRV_SECTOR_SIZE / object_size;
2397 offset = (acb->sector_num * BDRV_SECTOR_SIZE) % object_size;
2398
2399 /*
2400 * Make sure we don't free the aiocb before we are done with all requests.
2401 * This additional reference is dropped at the end of this function.
2402 */
2403 acb->nr_pending++;
2404
2405 while (done != total) {
2406 uint8_t flags = 0;
2407 uint64_t old_oid = 0;
2408 bool create = false;
2409
2410 oid = vid_to_data_oid(inode->data_vdi_id[idx], idx);
2411
2412 len = MIN(total - done, object_size - offset);
2413
2414 switch (acb->aiocb_type) {
2415 case AIOCB_READ_UDATA:
2416 if (!inode->data_vdi_id[idx]) {
2417 qemu_iovec_memset(acb->qiov, done, 0, len);
2418 goto done;
2419 }
2420 break;
2421 case AIOCB_WRITE_UDATA:
2422 if (!inode->data_vdi_id[idx]) {
2423 create = true;
2424 } else if (!is_data_obj_writable(inode, idx)) {
2425 /* Copy-On-Write */
2426 create = true;
2427 old_oid = oid;
2428 flags = SD_FLAG_CMD_COW;
2429 }
2430 break;
2431 case AIOCB_DISCARD_OBJ:
2432 /*
2433 * We discard the object only when the whole object is
2434 * 1) allocated 2) trimmed. Otherwise, simply skip it.
2435 */
2436 if (len != object_size || inode->data_vdi_id[idx] == 0) {
2437 goto done;
2438 }
2439 break;
2440 default:
2441 break;
2442 }
2443
2444 if (create) {
2445 DPRINTF("update ino (%" PRIu32 ") %" PRIu64 " %" PRIu64 " %ld\n",
2446 inode->vdi_id, oid,
2447 vid_to_data_oid(inode->data_vdi_id[idx], idx), idx);
2448 oid = vid_to_data_oid(inode->vdi_id, idx);
2449 DPRINTF("new oid %" PRIx64 "\n", oid);
2450 }
2451
2452 aio_req = alloc_aio_req(s, acb, oid, len, offset, flags, create,
2453 old_oid,
2454 acb->aiocb_type == AIOCB_DISCARD_OBJ ?
2455 0 : done);
2456 add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov,
2457 acb->aiocb_type);
2458 done:
2459 offset = 0;
2460 idx++;
2461 done += len;
2462 }
2463 if (--acb->nr_pending) {
2464 qemu_coroutine_yield();
2465 }
2466 }
2467
2468 static void sd_aio_complete(SheepdogAIOCB *acb)
2469 {
2470 BDRVSheepdogState *s;
2471 if (acb->aiocb_type == AIOCB_FLUSH_CACHE) {
2472 return;
2473 }
2474
2475 s = acb->s;
2476 qemu_co_mutex_lock(&s->queue_lock);
2477 QLIST_REMOVE(acb, aiocb_siblings);
2478 qemu_co_queue_restart_all(&s->overlapping_queue);
2479 qemu_co_mutex_unlock(&s->queue_lock);
2480 }
2481
2482 static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
2483 int nb_sectors, QEMUIOVector *qiov)
2484 {
2485 SheepdogAIOCB acb;
2486 int ret;
2487 int64_t offset = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE;
2488 BDRVSheepdogState *s = bs->opaque;
2489
2490 if (offset > s->inode.vdi_size) {
2491 ret = sd_truncate(bs, offset, PREALLOC_MODE_OFF, NULL);
2492 if (ret < 0) {
2493 return ret;
2494 }
2495 }
2496
2497 sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_WRITE_UDATA);
2498 sd_co_rw_vector(&acb);
2499 sd_write_done(&acb);
2500 sd_aio_complete(&acb);
2501
2502 return acb.ret;
2503 }
2504
2505 static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num,
2506 int nb_sectors, QEMUIOVector *qiov)
2507 {
2508 SheepdogAIOCB acb;
2509 BDRVSheepdogState *s = bs->opaque;
2510
2511 sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_READ_UDATA);
2512 sd_co_rw_vector(&acb);
2513 sd_aio_complete(&acb);
2514
2515 return acb.ret;
2516 }
2517
2518 static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs)
2519 {
2520 BDRVSheepdogState *s = bs->opaque;
2521 SheepdogAIOCB acb;
2522 AIOReq *aio_req;
2523
2524 if (s->cache_flags != SD_FLAG_CMD_CACHE) {
2525 return 0;
2526 }
2527
2528 sd_aio_setup(&acb, s, NULL, 0, 0, AIOCB_FLUSH_CACHE);
2529
2530 acb.nr_pending++;
2531 aio_req = alloc_aio_req(s, &acb, vid_to_vdi_oid(s->inode.vdi_id),
2532 0, 0, 0, false, 0, 0);
2533 add_aio_request(s, aio_req, NULL, 0, acb.aiocb_type);
2534
2535 if (--acb.nr_pending) {
2536 qemu_coroutine_yield();
2537 }
2538
2539 sd_aio_complete(&acb);
2540 return acb.ret;
2541 }
2542
2543 static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
2544 {
2545 Error *local_err = NULL;
2546 BDRVSheepdogState *s = bs->opaque;
2547 int ret, fd;
2548 uint32_t new_vid;
2549 SheepdogInode *inode;
2550 unsigned int datalen;
2551
2552 DPRINTF("sn_info: name %s id_str %s s: name %s vm_state_size %" PRId64 " "
2553 "is_snapshot %d\n", sn_info->name, sn_info->id_str,
2554 s->name, sn_info->vm_state_size, s->is_snapshot);
2555
2556 if (s->is_snapshot) {
2557 error_report("You can't create a snapshot of a snapshot VDI, "
2558 "%s (%" PRIu32 ").", s->name, s->inode.vdi_id);
2559
2560 return -EINVAL;
2561 }
2562
2563 DPRINTF("%s %s\n", sn_info->name, sn_info->id_str);
2564
2565 s->inode.vm_state_size = sn_info->vm_state_size;
2566 s->inode.vm_clock_nsec = sn_info->vm_clock_nsec;
2567 /* It appears that inode.tag does not require a NUL terminator,
2568 * which means this use of strncpy is ok.
2569 */
2570 strncpy(s->inode.tag, sn_info->name, sizeof(s->inode.tag));
2571 /* we don't need to update entire object */
2572 datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id);
2573 inode = g_malloc(datalen);
2574
2575 /* refresh inode. */
2576 fd = connect_to_sdog(s, &local_err);
2577 if (fd < 0) {
2578 error_report_err(local_err);
2579 ret = fd;
2580 goto cleanup;
2581 }
2582
2583 ret = write_object(fd, s->bs, (char *)&s->inode,
2584 vid_to_vdi_oid(s->inode.vdi_id), s->inode.nr_copies,
2585 datalen, 0, false, s->cache_flags);
2586 if (ret < 0) {
2587 error_report("failed to write snapshot's inode.");
2588 goto cleanup;
2589 }
2590
2591 ret = do_sd_create(s, &new_vid, 1, &local_err);
2592 if (ret < 0) {
2593 error_reportf_err(local_err,
2594 "failed to create inode for snapshot: ");
2595 goto cleanup;
2596 }
2597
2598 ret = read_object(fd, s->bs, (char *)inode,
2599 vid_to_vdi_oid(new_vid), s->inode.nr_copies, datalen, 0,
2600 s->cache_flags);
2601
2602 if (ret < 0) {
2603 error_report("failed to read new inode info. %s", strerror(errno));
2604 goto cleanup;
2605 }
2606
2607 memcpy(&s->inode, inode, datalen);
2608 DPRINTF("s->inode: name %s snap_id %x oid %x\n",
2609 s->inode.name, s->inode.snap_id, s->inode.vdi_id);
2610
2611 cleanup:
2612 g_free(inode);
2613 closesocket(fd);
2614 return ret;
2615 }
2616
2617 /*
2618 * We implement rollback(loadvm) operation to the specified snapshot by
2619 * 1) switch to the snapshot
2620 * 2) rely on sd_create_branch to delete working VDI and
2621 * 3) create a new working VDI based on the specified snapshot
2622 */
2623 static int sd_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
2624 {
2625 BDRVSheepdogState *s = bs->opaque;
2626 BDRVSheepdogState *old_s;
2627 char tag[SD_MAX_VDI_TAG_LEN];
2628 uint32_t snapid = 0;
2629 int ret;
2630
2631 if (!sd_parse_snapid_or_tag(snapshot_id, &snapid, tag)) {
2632 return -EINVAL;
2633 }
2634
2635 old_s = g_new(BDRVSheepdogState, 1);
2636
2637 memcpy(old_s, s, sizeof(BDRVSheepdogState));
2638
2639 ret = reload_inode(s, snapid, tag);
2640 if (ret) {
2641 goto out;
2642 }
2643
2644 ret = sd_create_branch(s);
2645 if (ret) {
2646 goto out;
2647 }
2648
2649 g_free(old_s);
2650
2651 return 0;
2652 out:
2653 /* recover bdrv_sd_state */
2654 memcpy(s, old_s, sizeof(BDRVSheepdogState));
2655 g_free(old_s);
2656
2657 error_report("failed to open. recover old bdrv_sd_state.");
2658
2659 return ret;
2660 }
2661
2662 #define NR_BATCHED_DISCARD 128
2663
2664 static int remove_objects(BDRVSheepdogState *s, Error **errp)
2665 {
2666 int fd, i = 0, nr_objs = 0;
2667 int ret;
2668 SheepdogInode *inode = &s->inode;
2669
2670 fd = connect_to_sdog(s, errp);
2671 if (fd < 0) {
2672 return fd;
2673 }
2674
2675 nr_objs = count_data_objs(inode);
2676 while (i < nr_objs) {
2677 int start_idx, nr_filled_idx;
2678
2679 while (i < nr_objs && !inode->data_vdi_id[i]) {
2680 i++;
2681 }
2682 start_idx = i;
2683
2684 nr_filled_idx = 0;
2685 while (i < nr_objs && nr_filled_idx < NR_BATCHED_DISCARD) {
2686 if (inode->data_vdi_id[i]) {
2687 inode->data_vdi_id[i] = 0;
2688 nr_filled_idx++;
2689 }
2690
2691 i++;
2692 }
2693
2694 ret = write_object(fd, s->bs,
2695 (char *)&inode->data_vdi_id[start_idx],
2696 vid_to_vdi_oid(s->inode.vdi_id), inode->nr_copies,
2697 (i - start_idx) * sizeof(uint32_t),
2698 offsetof(struct SheepdogInode,
2699 data_vdi_id[start_idx]),
2700 false, s->cache_flags);
2701 if (ret < 0) {
2702 error_setg(errp, "Failed to discard snapshot inode");
2703 goto out;
2704 }
2705 }
2706
2707 ret = 0;
2708 out:
2709 closesocket(fd);
2710 return ret;
2711 }
2712
2713 static int sd_snapshot_delete(BlockDriverState *bs,
2714 const char *snapshot_id,
2715 const char *name,
2716 Error **errp)
2717 {
2718 /*
2719 * FIXME should delete the snapshot matching both @snapshot_id and
2720 * @name, but @name not used here
2721 */
2722 unsigned long snap_id = 0;
2723 char snap_tag[SD_MAX_VDI_TAG_LEN];
2724 int fd, ret;
2725 char buf[SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN];
2726 BDRVSheepdogState *s = bs->opaque;
2727 unsigned int wlen = SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN, rlen = 0;
2728 uint32_t vid;
2729 SheepdogVdiReq hdr = {
2730 .opcode = SD_OP_DEL_VDI,
2731 .data_length = wlen,
2732 .flags = SD_FLAG_CMD_WRITE,
2733 };
2734 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
2735
2736 ret = remove_objects(s, errp);
2737 if (ret) {
2738 return ret;
2739 }
2740
2741 memset(buf, 0, sizeof(buf));
2742 memset(snap_tag, 0, sizeof(snap_tag));
2743 pstrcpy(buf, SD_MAX_VDI_LEN, s->name);
2744 /* TODO Use sd_parse_snapid() once this mess is cleaned up */
2745 ret = qemu_strtoul(snapshot_id, NULL, 10, &snap_id);
2746 if (ret || snap_id > UINT32_MAX) {
2747 /*
2748 * FIXME Since qemu_strtoul() returns -EINVAL when
2749 * @snapshot_id is null, @snapshot_id is mandatory. Correct
2750 * would be to require at least one of @snapshot_id and @name.
2751 */
2752 error_setg(errp, "Invalid snapshot ID: %s",
2753 snapshot_id ? snapshot_id : "<null>");
2754 return -EINVAL;
2755 }
2756
2757 if (snap_id) {
2758 hdr.snapid = (uint32_t) snap_id;
2759 } else {
2760 /* FIXME I suspect we should use @name here */
2761 /* FIXME don't truncate silently */
2762 pstrcpy(snap_tag, sizeof(snap_tag), snapshot_id);
2763 pstrcpy(buf + SD_MAX_VDI_LEN, SD_MAX_VDI_TAG_LEN, snap_tag);
2764 }
2765
2766 ret = find_vdi_name(s, s->name, snap_id, snap_tag, &vid, true, errp);
2767 if (ret) {
2768 return ret;
2769 }
2770
2771 fd = connect_to_sdog(s, errp);
2772 if (fd < 0) {
2773 return fd;
2774 }
2775
2776 ret = do_req(fd, s->bs, (SheepdogReq *)&hdr,
2777 buf, &wlen, &rlen);
2778 closesocket(fd);
2779 if (ret) {
2780 error_setg_errno(errp, -ret, "Couldn't send request to server");
2781 return ret;
2782 }
2783
2784 switch (rsp->result) {
2785 case SD_RES_NO_VDI:
2786 error_setg(errp, "Can't find the snapshot");
2787 return -ENOENT;
2788 case SD_RES_SUCCESS:
2789 break;
2790 default:
2791 error_setg(errp, "%s", sd_strerror(rsp->result));
2792 return -EIO;
2793 }
2794
2795 return 0;
2796 }
2797
2798 static int sd_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab)
2799 {
2800 Error *local_err = NULL;
2801 BDRVSheepdogState *s = bs->opaque;
2802 SheepdogReq req;
2803 int fd, nr = 1024, ret, max = BITS_TO_LONGS(SD_NR_VDIS) * sizeof(long);
2804 QEMUSnapshotInfo *sn_tab = NULL;
2805 unsigned wlen, rlen;
2806 int found = 0;
2807 static SheepdogInode inode;
2808 unsigned long *vdi_inuse;
2809 unsigned int start_nr;
2810 uint64_t hval;
2811 uint32_t vid;
2812
2813 vdi_inuse = g_malloc(max);
2814
2815 fd = connect_to_sdog(s, &local_err);
2816 if (fd < 0) {
2817 error_report_err(local_err);
2818 ret = fd;
2819 goto out;
2820 }
2821
2822 rlen = max;
2823 wlen = 0;
2824
2825 memset(&req, 0, sizeof(req));
2826
2827 req.opcode = SD_OP_READ_VDIS;
2828 req.data_length = max;
2829
2830 ret = do_req(fd, s->bs, &req, vdi_inuse, &wlen, &rlen);
2831
2832 closesocket(fd);
2833 if (ret) {
2834 goto out;
2835 }
2836
2837 sn_tab = g_new0(QEMUSnapshotInfo, nr);
2838
2839 /* calculate a vdi id with hash function */
2840 hval = fnv_64a_buf(s->name, strlen(s->name), FNV1A_64_INIT);
2841 start_nr = hval & (SD_NR_VDIS - 1);
2842
2843 fd = connect_to_sdog(s, &local_err);
2844 if (fd < 0) {
2845 error_report_err(local_err);
2846 ret = fd;
2847 goto out;
2848 }
2849
2850 for (vid = start_nr; found < nr; vid = (vid + 1) % SD_NR_VDIS) {
2851 if (!test_bit(vid, vdi_inuse)) {
2852 break;
2853 }
2854
2855 /* we don't need to read entire object */
2856 ret = read_object(fd, s->bs, (char *)&inode,
2857 vid_to_vdi_oid(vid),
2858 0, SD_INODE_SIZE - sizeof(inode.data_vdi_id), 0,
2859 s->cache_flags);
2860
2861 if (ret) {
2862 continue;
2863 }
2864
2865 if (!strcmp(inode.name, s->name) && is_snapshot(&inode)) {
2866 sn_tab[found].date_sec = inode.snap_ctime >> 32;
2867 sn_tab[found].date_nsec = inode.snap_ctime & 0xffffffff;
2868 sn_tab[found].vm_state_size = inode.vm_state_size;
2869 sn_tab[found].vm_clock_nsec = inode.vm_clock_nsec;
2870
2871 snprintf(sn_tab[found].id_str, sizeof(sn_tab[found].id_str),
2872 "%" PRIu32, inode.snap_id);
2873 pstrcpy(sn_tab[found].name,
2874 MIN(sizeof(sn_tab[found].name), sizeof(inode.tag)),
2875 inode.tag);
2876 found++;
2877 }
2878 }
2879
2880 closesocket(fd);
2881 out:
2882 *psn_tab = sn_tab;
2883
2884 g_free(vdi_inuse);
2885
2886 if (ret < 0) {
2887 return ret;
2888 }
2889
2890 return found;
2891 }
2892
2893 static int do_load_save_vmstate(BDRVSheepdogState *s, uint8_t *data,
2894 int64_t pos, int size, int load)
2895 {
2896 Error *local_err = NULL;
2897 bool create;
2898 int fd, ret = 0, remaining = size;
2899 unsigned int data_len;
2900 uint64_t vmstate_oid;
2901 uint64_t offset;
2902 uint32_t vdi_index;
2903 uint32_t vdi_id = load ? s->inode.parent_vdi_id : s->inode.vdi_id;
2904 uint32_t object_size = (UINT32_C(1) << s->inode.block_size_shift);
2905
2906 fd = connect_to_sdog(s, &local_err);
2907 if (fd < 0) {
2908 error_report_err(local_err);
2909 return fd;
2910 }
2911
2912 while (remaining) {
2913 vdi_index = pos / object_size;
2914 offset = pos % object_size;
2915
2916 data_len = MIN(remaining, object_size - offset);
2917
2918 vmstate_oid = vid_to_vmstate_oid(vdi_id, vdi_index);
2919
2920 create = (offset == 0);
2921 if (load) {
2922 ret = read_object(fd, s->bs, (char *)data, vmstate_oid,
2923 s->inode.nr_copies, data_len, offset,
2924 s->cache_flags);
2925 } else {
2926 ret = write_object(fd, s->bs, (char *)data, vmstate_oid,
2927 s->inode.nr_copies, data_len, offset, create,
2928 s->cache_flags);
2929 }
2930
2931 if (ret < 0) {
2932 error_report("failed to save vmstate %s", strerror(errno));
2933 goto cleanup;
2934 }
2935
2936 pos += data_len;
2937 data += data_len;
2938 remaining -= data_len;
2939 }
2940 ret = size;
2941 cleanup:
2942 closesocket(fd);
2943 return ret;
2944 }
2945
2946 static int sd_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
2947 int64_t pos)
2948 {
2949 BDRVSheepdogState *s = bs->opaque;
2950 void *buf;
2951 int ret;
2952
2953 buf = qemu_blockalign(bs, qiov->size);
2954 qemu_iovec_to_buf(qiov, 0, buf, qiov->size);
2955 ret = do_load_save_vmstate(s, (uint8_t *) buf, pos, qiov->size, 0);
2956 qemu_vfree(buf);
2957
2958 return ret;
2959 }
2960
2961 static int sd_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
2962 int64_t pos)
2963 {
2964 BDRVSheepdogState *s = bs->opaque;
2965 void *buf;
2966 int ret;
2967
2968 buf = qemu_blockalign(bs, qiov->size);
2969 ret = do_load_save_vmstate(s, buf, pos, qiov->size, 1);
2970 qemu_iovec_from_buf(qiov, 0, buf, qiov->size);
2971 qemu_vfree(buf);
2972
2973 return ret;
2974 }
2975
2976
2977 static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset,
2978 int bytes)
2979 {
2980 SheepdogAIOCB acb;
2981 BDRVSheepdogState *s = bs->opaque;
2982 QEMUIOVector discard_iov;
2983 struct iovec iov;
2984 uint32_t zero = 0;
2985
2986 if (!s->discard_supported) {
2987 return 0;
2988 }
2989
2990 memset(&discard_iov, 0, sizeof(discard_iov));
2991 memset(&iov, 0, sizeof(iov));
2992 iov.iov_base = &zero;
2993 iov.iov_len = sizeof(zero);
2994 discard_iov.iov = &iov;
2995 discard_iov.niov = 1;
2996 if (!QEMU_IS_ALIGNED(offset | bytes, BDRV_SECTOR_SIZE)) {
2997 return -ENOTSUP;
2998 }
2999 sd_aio_setup(&acb, s, &discard_iov, offset >> BDRV_SECTOR_BITS,
3000 bytes >> BDRV_SECTOR_BITS, AIOCB_DISCARD_OBJ);
3001 sd_co_rw_vector(&acb);
3002 sd_aio_complete(&acb);
3003
3004 return acb.ret;
3005 }
3006
3007 static coroutine_fn int
3008 sd_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
3009 int64_t bytes, int64_t *pnum, int64_t *map,
3010 BlockDriverState **file)
3011 {
3012 BDRVSheepdogState *s = bs->opaque;
3013 SheepdogInode *inode = &s->inode;
3014 uint32_t object_size = (UINT32_C(1) << inode->block_size_shift);
3015 unsigned long start = offset / object_size,
3016 end = DIV_ROUND_UP(offset + bytes, object_size);
3017 unsigned long idx;
3018 *map = offset;
3019 int ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
3020
3021 for (idx = start; idx < end; idx++) {
3022 if (inode->data_vdi_id[idx] == 0) {
3023 break;
3024 }
3025 }
3026 if (idx == start) {
3027 /* Get the longest length of unallocated sectors */
3028 ret = 0;
3029 for (idx = start + 1; idx < end; idx++) {
3030 if (inode->data_vdi_id[idx] != 0) {
3031 break;
3032 }
3033 }
3034 }
3035
3036 *pnum = (idx - start) * object_size;
3037 if (*pnum > bytes) {
3038 *pnum = bytes;
3039 }
3040 if (ret > 0 && ret & BDRV_BLOCK_OFFSET_VALID) {
3041 *file = bs;
3042 }
3043 return ret;
3044 }
3045
3046 static int64_t sd_get_allocated_file_size(BlockDriverState *bs)
3047 {
3048 BDRVSheepdogState *s = bs->opaque;
3049 SheepdogInode *inode = &s->inode;
3050 uint32_t object_size = (UINT32_C(1) << inode->block_size_shift);
3051 unsigned long i, last = DIV_ROUND_UP(inode->vdi_size, object_size);
3052 uint64_t size = 0;
3053
3054 for (i = 0; i < last; i++) {
3055 if (inode->data_vdi_id[i] == 0) {
3056 continue;
3057 }
3058 size += object_size;
3059 }
3060 return size;
3061 }
3062
3063 static QemuOptsList sd_create_opts = {
3064 .name = "sheepdog-create-opts",
3065 .head = QTAILQ_HEAD_INITIALIZER(sd_create_opts.head),
3066 .desc = {
3067 {
3068 .name = BLOCK_OPT_SIZE,
3069 .type = QEMU_OPT_SIZE,
3070 .help = "Virtual disk size"
3071 },
3072 {
3073 .name = BLOCK_OPT_BACKING_FILE,
3074 .type = QEMU_OPT_STRING,
3075 .help = "File name of a base image"
3076 },
3077 {
3078 .name = BLOCK_OPT_PREALLOC,
3079 .type = QEMU_OPT_STRING,
3080 .help = "Preallocation mode (allowed values: off, full)"
3081 },
3082 {
3083 .name = BLOCK_OPT_REDUNDANCY,
3084 .type = QEMU_OPT_STRING,
3085 .help = "Redundancy of the image"
3086 },
3087 {
3088 .name = BLOCK_OPT_OBJECT_SIZE,
3089 .type = QEMU_OPT_SIZE,
3090 .help = "Object size of the image"
3091 },
3092 { /* end of list */ }
3093 }
3094 };
3095
3096 static BlockDriver bdrv_sheepdog = {
3097 .format_name = "sheepdog",
3098 .protocol_name = "sheepdog",
3099 .instance_size = sizeof(BDRVSheepdogState),
3100 .bdrv_parse_filename = sd_parse_filename,
3101 .bdrv_file_open = sd_open,
3102 .bdrv_reopen_prepare = sd_reopen_prepare,
3103 .bdrv_reopen_commit = sd_reopen_commit,
3104 .bdrv_reopen_abort = sd_reopen_abort,
3105 .bdrv_close = sd_close,
3106 .bdrv_co_create_opts = sd_co_create_opts,
3107 .bdrv_has_zero_init = bdrv_has_zero_init_1,
3108 .bdrv_getlength = sd_getlength,
3109 .bdrv_get_allocated_file_size = sd_get_allocated_file_size,
3110 .bdrv_truncate = sd_truncate,
3111
3112 .bdrv_co_readv = sd_co_readv,
3113 .bdrv_co_writev = sd_co_writev,
3114 .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
3115 .bdrv_co_pdiscard = sd_co_pdiscard,
3116 .bdrv_co_block_status = sd_co_block_status,
3117
3118 .bdrv_snapshot_create = sd_snapshot_create,
3119 .bdrv_snapshot_goto = sd_snapshot_goto,
3120 .bdrv_snapshot_delete = sd_snapshot_delete,
3121 .bdrv_snapshot_list = sd_snapshot_list,
3122
3123 .bdrv_save_vmstate = sd_save_vmstate,
3124 .bdrv_load_vmstate = sd_load_vmstate,
3125
3126 .bdrv_detach_aio_context = sd_detach_aio_context,
3127 .bdrv_attach_aio_context = sd_attach_aio_context,
3128
3129 .create_opts = &sd_create_opts,
3130 };
3131
3132 static BlockDriver bdrv_sheepdog_tcp = {
3133 .format_name = "sheepdog",
3134 .protocol_name = "sheepdog+tcp",
3135 .instance_size = sizeof(BDRVSheepdogState),
3136 .bdrv_parse_filename = sd_parse_filename,
3137 .bdrv_file_open = sd_open,
3138 .bdrv_reopen_prepare = sd_reopen_prepare,
3139 .bdrv_reopen_commit = sd_reopen_commit,
3140 .bdrv_reopen_abort = sd_reopen_abort,
3141 .bdrv_close = sd_close,
3142 .bdrv_co_create_opts = sd_co_create_opts,
3143 .bdrv_has_zero_init = bdrv_has_zero_init_1,
3144 .bdrv_getlength = sd_getlength,
3145 .bdrv_get_allocated_file_size = sd_get_allocated_file_size,
3146 .bdrv_truncate = sd_truncate,
3147
3148 .bdrv_co_readv = sd_co_readv,
3149 .bdrv_co_writev = sd_co_writev,
3150 .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
3151 .bdrv_co_pdiscard = sd_co_pdiscard,
3152 .bdrv_co_block_status = sd_co_block_status,
3153
3154 .bdrv_snapshot_create = sd_snapshot_create,
3155 .bdrv_snapshot_goto = sd_snapshot_goto,
3156 .bdrv_snapshot_delete = sd_snapshot_delete,
3157 .bdrv_snapshot_list = sd_snapshot_list,
3158
3159 .bdrv_save_vmstate = sd_save_vmstate,
3160 .bdrv_load_vmstate = sd_load_vmstate,
3161
3162 .bdrv_detach_aio_context = sd_detach_aio_context,
3163 .bdrv_attach_aio_context = sd_attach_aio_context,
3164
3165 .create_opts = &sd_create_opts,
3166 };
3167
3168 static BlockDriver bdrv_sheepdog_unix = {
3169 .format_name = "sheepdog",
3170 .protocol_name = "sheepdog+unix",
3171 .instance_size = sizeof(BDRVSheepdogState),
3172 .bdrv_parse_filename = sd_parse_filename,
3173 .bdrv_file_open = sd_open,
3174 .bdrv_reopen_prepare = sd_reopen_prepare,
3175 .bdrv_reopen_commit = sd_reopen_commit,
3176 .bdrv_reopen_abort = sd_reopen_abort,
3177 .bdrv_close = sd_close,
3178 .bdrv_co_create_opts = sd_co_create_opts,
3179 .bdrv_has_zero_init = bdrv_has_zero_init_1,
3180 .bdrv_getlength = sd_getlength,
3181 .bdrv_get_allocated_file_size = sd_get_allocated_file_size,
3182 .bdrv_truncate = sd_truncate,
3183
3184 .bdrv_co_readv = sd_co_readv,
3185 .bdrv_co_writev = sd_co_writev,
3186 .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
3187 .bdrv_co_pdiscard = sd_co_pdiscard,
3188 .bdrv_co_block_status = sd_co_block_status,
3189
3190 .bdrv_snapshot_create = sd_snapshot_create,
3191 .bdrv_snapshot_goto = sd_snapshot_goto,
3192 .bdrv_snapshot_delete = sd_snapshot_delete,
3193 .bdrv_snapshot_list = sd_snapshot_list,
3194
3195 .bdrv_save_vmstate = sd_save_vmstate,
3196 .bdrv_load_vmstate = sd_load_vmstate,
3197
3198 .bdrv_detach_aio_context = sd_detach_aio_context,
3199 .bdrv_attach_aio_context = sd_attach_aio_context,
3200
3201 .create_opts = &sd_create_opts,
3202 };
3203
3204 static void bdrv_sheepdog_init(void)
3205 {
3206 bdrv_register(&bdrv_sheepdog);
3207 bdrv_register(&bdrv_sheepdog_tcp);
3208 bdrv_register(&bdrv_sheepdog_unix);
3209 }
3210 block_init(bdrv_sheepdog_init);