]> git.proxmox.com Git - mirror_qemu.git/blob - block/sheepdog.c
sheepdog: try to reconnect to sheepdog after network error
[mirror_qemu.git] / block / sheepdog.c
1 /*
2 * Copyright (C) 2009-2010 Nippon Telegraph and Telephone Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * You should have received a copy of the GNU General Public License
9 * along with this program. If not, see <http://www.gnu.org/licenses/>.
10 *
11 * Contributions after 2012-01-13 are licensed under the terms of the
12 * GNU GPL, version 2 or (at your option) any later version.
13 */
14
15 #include "qemu-common.h"
16 #include "qemu/uri.h"
17 #include "qemu/error-report.h"
18 #include "qemu/sockets.h"
19 #include "block/block_int.h"
20 #include "qemu/bitops.h"
21
22 #define SD_PROTO_VER 0x01
23
24 #define SD_DEFAULT_ADDR "localhost"
25 #define SD_DEFAULT_PORT 7000
26
27 #define SD_OP_CREATE_AND_WRITE_OBJ 0x01
28 #define SD_OP_READ_OBJ 0x02
29 #define SD_OP_WRITE_OBJ 0x03
30 /* 0x04 is used internally by Sheepdog */
31 #define SD_OP_DISCARD_OBJ 0x05
32
33 #define SD_OP_NEW_VDI 0x11
34 #define SD_OP_LOCK_VDI 0x12
35 #define SD_OP_RELEASE_VDI 0x13
36 #define SD_OP_GET_VDI_INFO 0x14
37 #define SD_OP_READ_VDIS 0x15
38 #define SD_OP_FLUSH_VDI 0x16
39 #define SD_OP_DEL_VDI 0x17
40
41 #define SD_FLAG_CMD_WRITE 0x01
42 #define SD_FLAG_CMD_COW 0x02
43 #define SD_FLAG_CMD_CACHE 0x04 /* Writeback mode for cache */
44 #define SD_FLAG_CMD_DIRECT 0x08 /* Don't use cache */
45
46 #define SD_RES_SUCCESS 0x00 /* Success */
47 #define SD_RES_UNKNOWN 0x01 /* Unknown error */
48 #define SD_RES_NO_OBJ 0x02 /* No object found */
49 #define SD_RES_EIO 0x03 /* I/O error */
50 #define SD_RES_VDI_EXIST 0x04 /* Vdi exists already */
51 #define SD_RES_INVALID_PARMS 0x05 /* Invalid parameters */
52 #define SD_RES_SYSTEM_ERROR 0x06 /* System error */
53 #define SD_RES_VDI_LOCKED 0x07 /* Vdi is locked */
54 #define SD_RES_NO_VDI 0x08 /* No vdi found */
55 #define SD_RES_NO_BASE_VDI 0x09 /* No base vdi found */
56 #define SD_RES_VDI_READ 0x0A /* Cannot read requested vdi */
57 #define SD_RES_VDI_WRITE 0x0B /* Cannot write requested vdi */
58 #define SD_RES_BASE_VDI_READ 0x0C /* Cannot read base vdi */
59 #define SD_RES_BASE_VDI_WRITE 0x0D /* Cannot write base vdi */
60 #define SD_RES_NO_TAG 0x0E /* Requested tag is not found */
61 #define SD_RES_STARTUP 0x0F /* Sheepdog is on starting up */
62 #define SD_RES_VDI_NOT_LOCKED 0x10 /* Vdi is not locked */
63 #define SD_RES_SHUTDOWN 0x11 /* Sheepdog is shutting down */
64 #define SD_RES_NO_MEM 0x12 /* Cannot allocate memory */
65 #define SD_RES_FULL_VDI 0x13 /* we already have the maximum vdis */
66 #define SD_RES_VER_MISMATCH 0x14 /* Protocol version mismatch */
67 #define SD_RES_NO_SPACE 0x15 /* Server has no room for new objects */
68 #define SD_RES_WAIT_FOR_FORMAT 0x16 /* Waiting for a format operation */
69 #define SD_RES_WAIT_FOR_JOIN 0x17 /* Waiting for other nodes joining */
70 #define SD_RES_JOIN_FAILED 0x18 /* Target node had failed to join sheepdog */
71 #define SD_RES_HALT 0x19 /* Sheepdog is stopped serving IO request */
72 #define SD_RES_READONLY 0x1A /* Object is read-only */
73
74 /*
75 * Object ID rules
76 *
77 * 0 - 19 (20 bits): data object space
78 * 20 - 31 (12 bits): reserved data object space
79 * 32 - 55 (24 bits): vdi object space
80 * 56 - 59 ( 4 bits): reserved vdi object space
81 * 60 - 63 ( 4 bits): object type identifier space
82 */
83
84 #define VDI_SPACE_SHIFT 32
85 #define VDI_BIT (UINT64_C(1) << 63)
86 #define VMSTATE_BIT (UINT64_C(1) << 62)
87 #define MAX_DATA_OBJS (UINT64_C(1) << 20)
88 #define MAX_CHILDREN 1024
89 #define SD_MAX_VDI_LEN 256
90 #define SD_MAX_VDI_TAG_LEN 256
91 #define SD_NR_VDIS (1U << 24)
92 #define SD_DATA_OBJ_SIZE (UINT64_C(1) << 22)
93 #define SD_MAX_VDI_SIZE (SD_DATA_OBJ_SIZE * MAX_DATA_OBJS)
94
95 #define SD_INODE_SIZE (sizeof(SheepdogInode))
96 #define CURRENT_VDI_ID 0
97
98 typedef struct SheepdogReq {
99 uint8_t proto_ver;
100 uint8_t opcode;
101 uint16_t flags;
102 uint32_t epoch;
103 uint32_t id;
104 uint32_t data_length;
105 uint32_t opcode_specific[8];
106 } SheepdogReq;
107
108 typedef struct SheepdogRsp {
109 uint8_t proto_ver;
110 uint8_t opcode;
111 uint16_t flags;
112 uint32_t epoch;
113 uint32_t id;
114 uint32_t data_length;
115 uint32_t result;
116 uint32_t opcode_specific[7];
117 } SheepdogRsp;
118
119 typedef struct SheepdogObjReq {
120 uint8_t proto_ver;
121 uint8_t opcode;
122 uint16_t flags;
123 uint32_t epoch;
124 uint32_t id;
125 uint32_t data_length;
126 uint64_t oid;
127 uint64_t cow_oid;
128 uint8_t copies;
129 uint8_t copy_policy;
130 uint8_t reserved[6];
131 uint64_t offset;
132 } SheepdogObjReq;
133
134 typedef struct SheepdogObjRsp {
135 uint8_t proto_ver;
136 uint8_t opcode;
137 uint16_t flags;
138 uint32_t epoch;
139 uint32_t id;
140 uint32_t data_length;
141 uint32_t result;
142 uint8_t copies;
143 uint8_t copy_policy;
144 uint8_t reserved[2];
145 uint32_t pad[6];
146 } SheepdogObjRsp;
147
148 typedef struct SheepdogVdiReq {
149 uint8_t proto_ver;
150 uint8_t opcode;
151 uint16_t flags;
152 uint32_t epoch;
153 uint32_t id;
154 uint32_t data_length;
155 uint64_t vdi_size;
156 uint32_t vdi_id;
157 uint8_t copies;
158 uint8_t copy_policy;
159 uint8_t reserved[2];
160 uint32_t snapid;
161 uint32_t pad[3];
162 } SheepdogVdiReq;
163
164 typedef struct SheepdogVdiRsp {
165 uint8_t proto_ver;
166 uint8_t opcode;
167 uint16_t flags;
168 uint32_t epoch;
169 uint32_t id;
170 uint32_t data_length;
171 uint32_t result;
172 uint32_t rsvd;
173 uint32_t vdi_id;
174 uint32_t pad[5];
175 } SheepdogVdiRsp;
176
177 typedef struct SheepdogInode {
178 char name[SD_MAX_VDI_LEN];
179 char tag[SD_MAX_VDI_TAG_LEN];
180 uint64_t ctime;
181 uint64_t snap_ctime;
182 uint64_t vm_clock_nsec;
183 uint64_t vdi_size;
184 uint64_t vm_state_size;
185 uint16_t copy_policy;
186 uint8_t nr_copies;
187 uint8_t block_size_shift;
188 uint32_t snap_id;
189 uint32_t vdi_id;
190 uint32_t parent_vdi_id;
191 uint32_t child_vdi_id[MAX_CHILDREN];
192 uint32_t data_vdi_id[MAX_DATA_OBJS];
193 } SheepdogInode;
194
195 /*
196 * 64 bit FNV-1a non-zero initial basis
197 */
198 #define FNV1A_64_INIT ((uint64_t)0xcbf29ce484222325ULL)
199
200 /*
201 * 64 bit Fowler/Noll/Vo FNV-1a hash code
202 */
203 static inline uint64_t fnv_64a_buf(void *buf, size_t len, uint64_t hval)
204 {
205 unsigned char *bp = buf;
206 unsigned char *be = bp + len;
207 while (bp < be) {
208 hval ^= (uint64_t) *bp++;
209 hval += (hval << 1) + (hval << 4) + (hval << 5) +
210 (hval << 7) + (hval << 8) + (hval << 40);
211 }
212 return hval;
213 }
214
215 static inline bool is_data_obj_writable(SheepdogInode *inode, unsigned int idx)
216 {
217 return inode->vdi_id == inode->data_vdi_id[idx];
218 }
219
220 static inline bool is_data_obj(uint64_t oid)
221 {
222 return !(VDI_BIT & oid);
223 }
224
225 static inline uint64_t data_oid_to_idx(uint64_t oid)
226 {
227 return oid & (MAX_DATA_OBJS - 1);
228 }
229
230 static inline uint32_t oid_to_vid(uint64_t oid)
231 {
232 return (oid & ~VDI_BIT) >> VDI_SPACE_SHIFT;
233 }
234
235 static inline uint64_t vid_to_vdi_oid(uint32_t vid)
236 {
237 return VDI_BIT | ((uint64_t)vid << VDI_SPACE_SHIFT);
238 }
239
240 static inline uint64_t vid_to_vmstate_oid(uint32_t vid, uint32_t idx)
241 {
242 return VMSTATE_BIT | ((uint64_t)vid << VDI_SPACE_SHIFT) | idx;
243 }
244
245 static inline uint64_t vid_to_data_oid(uint32_t vid, uint32_t idx)
246 {
247 return ((uint64_t)vid << VDI_SPACE_SHIFT) | idx;
248 }
249
250 static inline bool is_snapshot(struct SheepdogInode *inode)
251 {
252 return !!inode->snap_ctime;
253 }
254
255 #undef DPRINTF
256 #ifdef DEBUG_SDOG
257 #define DPRINTF(fmt, args...) \
258 do { \
259 fprintf(stdout, "%s %d: " fmt, __func__, __LINE__, ##args); \
260 } while (0)
261 #else
262 #define DPRINTF(fmt, args...)
263 #endif
264
265 typedef struct SheepdogAIOCB SheepdogAIOCB;
266
267 typedef struct AIOReq {
268 SheepdogAIOCB *aiocb;
269 unsigned int iov_offset;
270
271 uint64_t oid;
272 uint64_t base_oid;
273 uint64_t offset;
274 unsigned int data_len;
275 uint8_t flags;
276 uint32_t id;
277
278 QLIST_ENTRY(AIOReq) aio_siblings;
279 } AIOReq;
280
281 enum AIOCBState {
282 AIOCB_WRITE_UDATA,
283 AIOCB_READ_UDATA,
284 AIOCB_FLUSH_CACHE,
285 AIOCB_DISCARD_OBJ,
286 };
287
288 struct SheepdogAIOCB {
289 BlockDriverAIOCB common;
290
291 QEMUIOVector *qiov;
292
293 int64_t sector_num;
294 int nb_sectors;
295
296 int ret;
297 enum AIOCBState aiocb_type;
298
299 Coroutine *coroutine;
300 void (*aio_done_func)(SheepdogAIOCB *);
301
302 bool canceled;
303 int nr_pending;
304 };
305
306 typedef struct BDRVSheepdogState {
307 BlockDriverState *bs;
308
309 SheepdogInode inode;
310
311 uint32_t min_dirty_data_idx;
312 uint32_t max_dirty_data_idx;
313
314 char name[SD_MAX_VDI_LEN];
315 bool is_snapshot;
316 uint32_t cache_flags;
317 bool discard_supported;
318
319 char *host_spec;
320 bool is_unix;
321 int fd;
322
323 CoMutex lock;
324 Coroutine *co_send;
325 Coroutine *co_recv;
326
327 uint32_t aioreq_seq_num;
328
329 /* Every aio request must be linked to either of these queues. */
330 QLIST_HEAD(inflight_aio_head, AIOReq) inflight_aio_head;
331 QLIST_HEAD(pending_aio_head, AIOReq) pending_aio_head;
332 QLIST_HEAD(failed_aio_head, AIOReq) failed_aio_head;
333 } BDRVSheepdogState;
334
335 static const char * sd_strerror(int err)
336 {
337 int i;
338
339 static const struct {
340 int err;
341 const char *desc;
342 } errors[] = {
343 {SD_RES_SUCCESS, "Success"},
344 {SD_RES_UNKNOWN, "Unknown error"},
345 {SD_RES_NO_OBJ, "No object found"},
346 {SD_RES_EIO, "I/O error"},
347 {SD_RES_VDI_EXIST, "VDI exists already"},
348 {SD_RES_INVALID_PARMS, "Invalid parameters"},
349 {SD_RES_SYSTEM_ERROR, "System error"},
350 {SD_RES_VDI_LOCKED, "VDI is already locked"},
351 {SD_RES_NO_VDI, "No vdi found"},
352 {SD_RES_NO_BASE_VDI, "No base VDI found"},
353 {SD_RES_VDI_READ, "Failed read the requested VDI"},
354 {SD_RES_VDI_WRITE, "Failed to write the requested VDI"},
355 {SD_RES_BASE_VDI_READ, "Failed to read the base VDI"},
356 {SD_RES_BASE_VDI_WRITE, "Failed to write the base VDI"},
357 {SD_RES_NO_TAG, "Failed to find the requested tag"},
358 {SD_RES_STARTUP, "The system is still booting"},
359 {SD_RES_VDI_NOT_LOCKED, "VDI isn't locked"},
360 {SD_RES_SHUTDOWN, "The system is shutting down"},
361 {SD_RES_NO_MEM, "Out of memory on the server"},
362 {SD_RES_FULL_VDI, "We already have the maximum vdis"},
363 {SD_RES_VER_MISMATCH, "Protocol version mismatch"},
364 {SD_RES_NO_SPACE, "Server has no space for new objects"},
365 {SD_RES_WAIT_FOR_FORMAT, "Sheepdog is waiting for a format operation"},
366 {SD_RES_WAIT_FOR_JOIN, "Sheepdog is waiting for other nodes joining"},
367 {SD_RES_JOIN_FAILED, "Target node had failed to join sheepdog"},
368 {SD_RES_HALT, "Sheepdog is stopped serving IO request"},
369 {SD_RES_READONLY, "Object is read-only"},
370 };
371
372 for (i = 0; i < ARRAY_SIZE(errors); ++i) {
373 if (errors[i].err == err) {
374 return errors[i].desc;
375 }
376 }
377
378 return "Invalid error code";
379 }
380
381 /*
382 * Sheepdog I/O handling:
383 *
384 * 1. In sd_co_rw_vector, we send the I/O requests to the server and
385 * link the requests to the inflight_list in the
386 * BDRVSheepdogState. The function exits without waiting for
387 * receiving the response.
388 *
389 * 2. We receive the response in aio_read_response, the fd handler to
390 * the sheepdog connection. If metadata update is needed, we send
391 * the write request to the vdi object in sd_write_done, the write
392 * completion function. We switch back to sd_co_readv/writev after
393 * all the requests belonging to the AIOCB are finished.
394 */
395
396 static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb,
397 uint64_t oid, unsigned int data_len,
398 uint64_t offset, uint8_t flags,
399 uint64_t base_oid, unsigned int iov_offset)
400 {
401 AIOReq *aio_req;
402
403 aio_req = g_malloc(sizeof(*aio_req));
404 aio_req->aiocb = acb;
405 aio_req->iov_offset = iov_offset;
406 aio_req->oid = oid;
407 aio_req->base_oid = base_oid;
408 aio_req->offset = offset;
409 aio_req->data_len = data_len;
410 aio_req->flags = flags;
411 aio_req->id = s->aioreq_seq_num++;
412
413 acb->nr_pending++;
414 return aio_req;
415 }
416
417 static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
418 {
419 SheepdogAIOCB *acb = aio_req->aiocb;
420
421 QLIST_REMOVE(aio_req, aio_siblings);
422 g_free(aio_req);
423
424 acb->nr_pending--;
425 }
426
427 static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb)
428 {
429 if (!acb->canceled) {
430 qemu_coroutine_enter(acb->coroutine, NULL);
431 }
432 qemu_aio_release(acb);
433 }
434
435 static void sd_aio_cancel(BlockDriverAIOCB *blockacb)
436 {
437 SheepdogAIOCB *acb = (SheepdogAIOCB *)blockacb;
438
439 /*
440 * Sheepdog cannot cancel the requests which are already sent to
441 * the servers, so we just complete the request with -EIO here.
442 */
443 acb->ret = -EIO;
444 qemu_coroutine_enter(acb->coroutine, NULL);
445 acb->canceled = true;
446 }
447
448 static const AIOCBInfo sd_aiocb_info = {
449 .aiocb_size = sizeof(SheepdogAIOCB),
450 .cancel = sd_aio_cancel,
451 };
452
453 static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
454 int64_t sector_num, int nb_sectors)
455 {
456 SheepdogAIOCB *acb;
457
458 acb = qemu_aio_get(&sd_aiocb_info, bs, NULL, NULL);
459
460 acb->qiov = qiov;
461
462 acb->sector_num = sector_num;
463 acb->nb_sectors = nb_sectors;
464
465 acb->aio_done_func = NULL;
466 acb->canceled = false;
467 acb->coroutine = qemu_coroutine_self();
468 acb->ret = 0;
469 acb->nr_pending = 0;
470 return acb;
471 }
472
473 static int connect_to_sdog(BDRVSheepdogState *s)
474 {
475 int fd;
476 Error *err = NULL;
477
478 if (s->is_unix) {
479 fd = unix_connect(s->host_spec, &err);
480 } else {
481 fd = inet_connect(s->host_spec, &err);
482
483 if (err == NULL) {
484 int ret = socket_set_nodelay(fd);
485 if (ret < 0) {
486 error_report("%s", strerror(errno));
487 }
488 }
489 }
490
491 if (err != NULL) {
492 qerror_report_err(err);
493 error_free(err);
494 } else {
495 qemu_set_nonblock(fd);
496 }
497
498 return fd;
499 }
500
501 static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
502 unsigned int *wlen)
503 {
504 int ret;
505
506 ret = qemu_co_send(sockfd, hdr, sizeof(*hdr));
507 if (ret != sizeof(*hdr)) {
508 error_report("failed to send a req, %s", strerror(errno));
509 return ret;
510 }
511
512 ret = qemu_co_send(sockfd, data, *wlen);
513 if (ret != *wlen) {
514 error_report("failed to send a req, %s", strerror(errno));
515 }
516
517 return ret;
518 }
519
520 static void restart_co_req(void *opaque)
521 {
522 Coroutine *co = opaque;
523
524 qemu_coroutine_enter(co, NULL);
525 }
526
527 typedef struct SheepdogReqCo {
528 int sockfd;
529 SheepdogReq *hdr;
530 void *data;
531 unsigned int *wlen;
532 unsigned int *rlen;
533 int ret;
534 bool finished;
535 } SheepdogReqCo;
536
537 static coroutine_fn void do_co_req(void *opaque)
538 {
539 int ret;
540 Coroutine *co;
541 SheepdogReqCo *srco = opaque;
542 int sockfd = srco->sockfd;
543 SheepdogReq *hdr = srco->hdr;
544 void *data = srco->data;
545 unsigned int *wlen = srco->wlen;
546 unsigned int *rlen = srco->rlen;
547
548 co = qemu_coroutine_self();
549 qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, co);
550
551 ret = send_co_req(sockfd, hdr, data, wlen);
552 if (ret < 0) {
553 goto out;
554 }
555
556 qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, co);
557
558 ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
559 if (ret != sizeof(*hdr)) {
560 error_report("failed to get a rsp, %s", strerror(errno));
561 ret = -errno;
562 goto out;
563 }
564
565 if (*rlen > hdr->data_length) {
566 *rlen = hdr->data_length;
567 }
568
569 if (*rlen) {
570 ret = qemu_co_recv(sockfd, data, *rlen);
571 if (ret != *rlen) {
572 error_report("failed to get the data, %s", strerror(errno));
573 ret = -errno;
574 goto out;
575 }
576 }
577 ret = 0;
578 out:
579 /* there is at most one request for this sockfd, so it is safe to
580 * set each handler to NULL. */
581 qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL);
582
583 srco->ret = ret;
584 srco->finished = true;
585 }
586
587 static int do_req(int sockfd, SheepdogReq *hdr, void *data,
588 unsigned int *wlen, unsigned int *rlen)
589 {
590 Coroutine *co;
591 SheepdogReqCo srco = {
592 .sockfd = sockfd,
593 .hdr = hdr,
594 .data = data,
595 .wlen = wlen,
596 .rlen = rlen,
597 .ret = 0,
598 .finished = false,
599 };
600
601 if (qemu_in_coroutine()) {
602 do_co_req(&srco);
603 } else {
604 co = qemu_coroutine_create(do_co_req);
605 qemu_coroutine_enter(co, &srco);
606 while (!srco.finished) {
607 qemu_aio_wait();
608 }
609 }
610
611 return srco.ret;
612 }
613
614 static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
615 struct iovec *iov, int niov, bool create,
616 enum AIOCBState aiocb_type);
617 static int coroutine_fn resend_aioreq(BDRVSheepdogState *s, AIOReq *aio_req);
618 static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag);
619 static int get_sheep_fd(BDRVSheepdogState *s);
620 static void co_write_request(void *opaque);
621
622 static AIOReq *find_pending_req(BDRVSheepdogState *s, uint64_t oid)
623 {
624 AIOReq *aio_req;
625
626 QLIST_FOREACH(aio_req, &s->pending_aio_head, aio_siblings) {
627 if (aio_req->oid == oid) {
628 return aio_req;
629 }
630 }
631
632 return NULL;
633 }
634
635 /*
636 * This function searchs pending requests to the object `oid', and
637 * sends them.
638 */
639 static void coroutine_fn send_pending_req(BDRVSheepdogState *s, uint64_t oid)
640 {
641 AIOReq *aio_req;
642 SheepdogAIOCB *acb;
643 int ret;
644
645 while ((aio_req = find_pending_req(s, oid)) != NULL) {
646 acb = aio_req->aiocb;
647 /* move aio_req from pending list to inflight one */
648 QLIST_REMOVE(aio_req, aio_siblings);
649 QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
650 ret = add_aio_request(s, aio_req, acb->qiov->iov,
651 acb->qiov->niov, false, acb->aiocb_type);
652 if (ret < 0) {
653 error_report("add_aio_request is failed");
654 free_aio_req(s, aio_req);
655 if (!acb->nr_pending) {
656 sd_finish_aiocb(acb);
657 }
658 }
659 }
660 }
661
662 static coroutine_fn void reconnect_to_sdog(void *opaque)
663 {
664 BDRVSheepdogState *s = opaque;
665 AIOReq *aio_req, *next;
666
667 qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
668 close(s->fd);
669 s->fd = -1;
670
671 /* Wait for outstanding write requests to be completed. */
672 while (s->co_send != NULL) {
673 co_write_request(opaque);
674 }
675
676 /* Try to reconnect the sheepdog server every one second. */
677 while (s->fd < 0) {
678 s->fd = get_sheep_fd(s);
679 if (s->fd < 0) {
680 DPRINTF("Wait for connection to be established\n");
681 co_aio_sleep_ns(bdrv_get_aio_context(s->bs), QEMU_CLOCK_REALTIME,
682 1000000000ULL);
683 }
684 };
685
686 /*
687 * Now we have to resend all the request in the inflight queue. However,
688 * resend_aioreq() can yield and newly created requests can be added to the
689 * inflight queue before the coroutine is resumed. To avoid mixing them, we
690 * have to move all the inflight requests to the failed queue before
691 * resend_aioreq() is called.
692 */
693 QLIST_FOREACH_SAFE(aio_req, &s->inflight_aio_head, aio_siblings, next) {
694 QLIST_REMOVE(aio_req, aio_siblings);
695 QLIST_INSERT_HEAD(&s->failed_aio_head, aio_req, aio_siblings);
696 }
697
698 /* Resend all the failed aio requests. */
699 while (!QLIST_EMPTY(&s->failed_aio_head)) {
700 aio_req = QLIST_FIRST(&s->failed_aio_head);
701 QLIST_REMOVE(aio_req, aio_siblings);
702 QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
703 resend_aioreq(s, aio_req);
704 }
705 }
706
707 /*
708 * Receive responses of the I/O requests.
709 *
710 * This function is registered as a fd handler, and called from the
711 * main loop when s->fd is ready for reading responses.
712 */
713 static void coroutine_fn aio_read_response(void *opaque)
714 {
715 SheepdogObjRsp rsp;
716 BDRVSheepdogState *s = opaque;
717 int fd = s->fd;
718 int ret;
719 AIOReq *aio_req = NULL;
720 SheepdogAIOCB *acb;
721 uint64_t idx;
722
723 /* read a header */
724 ret = qemu_co_recv(fd, &rsp, sizeof(rsp));
725 if (ret != sizeof(rsp)) {
726 error_report("failed to get the header, %s", strerror(errno));
727 goto err;
728 }
729
730 /* find the right aio_req from the inflight aio list */
731 QLIST_FOREACH(aio_req, &s->inflight_aio_head, aio_siblings) {
732 if (aio_req->id == rsp.id) {
733 break;
734 }
735 }
736 if (!aio_req) {
737 error_report("cannot find aio_req %x", rsp.id);
738 goto err;
739 }
740
741 acb = aio_req->aiocb;
742
743 switch (acb->aiocb_type) {
744 case AIOCB_WRITE_UDATA:
745 /* this coroutine context is no longer suitable for co_recv
746 * because we may send data to update vdi objects */
747 s->co_recv = NULL;
748 if (!is_data_obj(aio_req->oid)) {
749 break;
750 }
751 idx = data_oid_to_idx(aio_req->oid);
752
753 if (s->inode.data_vdi_id[idx] != s->inode.vdi_id) {
754 /*
755 * If the object is newly created one, we need to update
756 * the vdi object (metadata object). min_dirty_data_idx
757 * and max_dirty_data_idx are changed to include updated
758 * index between them.
759 */
760 if (rsp.result == SD_RES_SUCCESS) {
761 s->inode.data_vdi_id[idx] = s->inode.vdi_id;
762 s->max_dirty_data_idx = MAX(idx, s->max_dirty_data_idx);
763 s->min_dirty_data_idx = MIN(idx, s->min_dirty_data_idx);
764 }
765 /*
766 * Some requests may be blocked because simultaneous
767 * create requests are not allowed, so we search the
768 * pending requests here.
769 */
770 send_pending_req(s, aio_req->oid);
771 }
772 break;
773 case AIOCB_READ_UDATA:
774 ret = qemu_co_recvv(fd, acb->qiov->iov, acb->qiov->niov,
775 aio_req->iov_offset, rsp.data_length);
776 if (ret != rsp.data_length) {
777 error_report("failed to get the data, %s", strerror(errno));
778 goto err;
779 }
780 break;
781 case AIOCB_FLUSH_CACHE:
782 if (rsp.result == SD_RES_INVALID_PARMS) {
783 DPRINTF("disable cache since the server doesn't support it\n");
784 s->cache_flags = SD_FLAG_CMD_DIRECT;
785 rsp.result = SD_RES_SUCCESS;
786 }
787 break;
788 case AIOCB_DISCARD_OBJ:
789 switch (rsp.result) {
790 case SD_RES_INVALID_PARMS:
791 error_report("sheep(%s) doesn't support discard command",
792 s->host_spec);
793 rsp.result = SD_RES_SUCCESS;
794 s->discard_supported = false;
795 break;
796 case SD_RES_SUCCESS:
797 idx = data_oid_to_idx(aio_req->oid);
798 s->inode.data_vdi_id[idx] = 0;
799 break;
800 default:
801 break;
802 }
803 }
804
805 switch (rsp.result) {
806 case SD_RES_SUCCESS:
807 break;
808 case SD_RES_READONLY:
809 if (s->inode.vdi_id == oid_to_vid(aio_req->oid)) {
810 ret = reload_inode(s, 0, "");
811 if (ret < 0) {
812 goto err;
813 }
814 }
815 if (is_data_obj(aio_req->oid)) {
816 aio_req->oid = vid_to_data_oid(s->inode.vdi_id,
817 data_oid_to_idx(aio_req->oid));
818 } else {
819 aio_req->oid = vid_to_vdi_oid(s->inode.vdi_id);
820 }
821 ret = resend_aioreq(s, aio_req);
822 if (ret == SD_RES_SUCCESS) {
823 goto out;
824 }
825 /* fall through */
826 default:
827 acb->ret = -EIO;
828 error_report("%s", sd_strerror(rsp.result));
829 break;
830 }
831
832 free_aio_req(s, aio_req);
833 if (!acb->nr_pending) {
834 /*
835 * We've finished all requests which belong to the AIOCB, so
836 * we can switch back to sd_co_readv/writev now.
837 */
838 acb->aio_done_func(acb);
839 }
840 out:
841 s->co_recv = NULL;
842 return;
843 err:
844 s->co_recv = NULL;
845 reconnect_to_sdog(opaque);
846 }
847
848 static void co_read_response(void *opaque)
849 {
850 BDRVSheepdogState *s = opaque;
851
852 if (!s->co_recv) {
853 s->co_recv = qemu_coroutine_create(aio_read_response);
854 }
855
856 qemu_coroutine_enter(s->co_recv, opaque);
857 }
858
859 static void co_write_request(void *opaque)
860 {
861 BDRVSheepdogState *s = opaque;
862
863 qemu_coroutine_enter(s->co_send, NULL);
864 }
865
866 /*
867 * Return a socket discriptor to read/write objects.
868 *
869 * We cannot use this discriptor for other operations because
870 * the block driver may be on waiting response from the server.
871 */
872 static int get_sheep_fd(BDRVSheepdogState *s)
873 {
874 int fd;
875
876 fd = connect_to_sdog(s);
877 if (fd < 0) {
878 return fd;
879 }
880
881 qemu_aio_set_fd_handler(fd, co_read_response, NULL, s);
882 return fd;
883 }
884
885 static int sd_parse_uri(BDRVSheepdogState *s, const char *filename,
886 char *vdi, uint32_t *snapid, char *tag)
887 {
888 URI *uri;
889 QueryParams *qp = NULL;
890 int ret = 0;
891
892 uri = uri_parse(filename);
893 if (!uri) {
894 return -EINVAL;
895 }
896
897 /* transport */
898 if (!strcmp(uri->scheme, "sheepdog")) {
899 s->is_unix = false;
900 } else if (!strcmp(uri->scheme, "sheepdog+tcp")) {
901 s->is_unix = false;
902 } else if (!strcmp(uri->scheme, "sheepdog+unix")) {
903 s->is_unix = true;
904 } else {
905 ret = -EINVAL;
906 goto out;
907 }
908
909 if (uri->path == NULL || !strcmp(uri->path, "/")) {
910 ret = -EINVAL;
911 goto out;
912 }
913 pstrcpy(vdi, SD_MAX_VDI_LEN, uri->path + 1);
914
915 qp = query_params_parse(uri->query);
916 if (qp->n > 1 || (s->is_unix && !qp->n) || (!s->is_unix && qp->n)) {
917 ret = -EINVAL;
918 goto out;
919 }
920
921 if (s->is_unix) {
922 /* sheepdog+unix:///vdiname?socket=path */
923 if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
924 ret = -EINVAL;
925 goto out;
926 }
927 s->host_spec = g_strdup(qp->p[0].value);
928 } else {
929 /* sheepdog[+tcp]://[host:port]/vdiname */
930 s->host_spec = g_strdup_printf("%s:%d", uri->server ?: SD_DEFAULT_ADDR,
931 uri->port ?: SD_DEFAULT_PORT);
932 }
933
934 /* snapshot tag */
935 if (uri->fragment) {
936 *snapid = strtoul(uri->fragment, NULL, 10);
937 if (*snapid == 0) {
938 pstrcpy(tag, SD_MAX_VDI_TAG_LEN, uri->fragment);
939 }
940 } else {
941 *snapid = CURRENT_VDI_ID; /* search current vdi */
942 }
943
944 out:
945 if (qp) {
946 query_params_free(qp);
947 }
948 uri_free(uri);
949 return ret;
950 }
951
952 /*
953 * Parse a filename (old syntax)
954 *
955 * filename must be one of the following formats:
956 * 1. [vdiname]
957 * 2. [vdiname]:[snapid]
958 * 3. [vdiname]:[tag]
959 * 4. [hostname]:[port]:[vdiname]
960 * 5. [hostname]:[port]:[vdiname]:[snapid]
961 * 6. [hostname]:[port]:[vdiname]:[tag]
962 *
963 * You can boot from the snapshot images by specifying `snapid` or
964 * `tag'.
965 *
966 * You can run VMs outside the Sheepdog cluster by specifying
967 * `hostname' and `port' (experimental).
968 */
969 static int parse_vdiname(BDRVSheepdogState *s, const char *filename,
970 char *vdi, uint32_t *snapid, char *tag)
971 {
972 char *p, *q, *uri;
973 const char *host_spec, *vdi_spec;
974 int nr_sep, ret;
975
976 strstart(filename, "sheepdog:", (const char **)&filename);
977 p = q = g_strdup(filename);
978
979 /* count the number of separators */
980 nr_sep = 0;
981 while (*p) {
982 if (*p == ':') {
983 nr_sep++;
984 }
985 p++;
986 }
987 p = q;
988
989 /* use the first two tokens as host_spec. */
990 if (nr_sep >= 2) {
991 host_spec = p;
992 p = strchr(p, ':');
993 p++;
994 p = strchr(p, ':');
995 *p++ = '\0';
996 } else {
997 host_spec = "";
998 }
999
1000 vdi_spec = p;
1001
1002 p = strchr(vdi_spec, ':');
1003 if (p) {
1004 *p++ = '#';
1005 }
1006
1007 uri = g_strdup_printf("sheepdog://%s/%s", host_spec, vdi_spec);
1008
1009 ret = sd_parse_uri(s, uri, vdi, snapid, tag);
1010
1011 g_free(q);
1012 g_free(uri);
1013
1014 return ret;
1015 }
1016
1017 static int find_vdi_name(BDRVSheepdogState *s, const char *filename,
1018 uint32_t snapid, const char *tag, uint32_t *vid,
1019 bool lock)
1020 {
1021 int ret, fd;
1022 SheepdogVdiReq hdr;
1023 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
1024 unsigned int wlen, rlen = 0;
1025 char buf[SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN];
1026
1027 fd = connect_to_sdog(s);
1028 if (fd < 0) {
1029 return fd;
1030 }
1031
1032 /* This pair of strncpy calls ensures that the buffer is zero-filled,
1033 * which is desirable since we'll soon be sending those bytes, and
1034 * don't want the send_req to read uninitialized data.
1035 */
1036 strncpy(buf, filename, SD_MAX_VDI_LEN);
1037 strncpy(buf + SD_MAX_VDI_LEN, tag, SD_MAX_VDI_TAG_LEN);
1038
1039 memset(&hdr, 0, sizeof(hdr));
1040 if (lock) {
1041 hdr.opcode = SD_OP_LOCK_VDI;
1042 } else {
1043 hdr.opcode = SD_OP_GET_VDI_INFO;
1044 }
1045 wlen = SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN;
1046 hdr.proto_ver = SD_PROTO_VER;
1047 hdr.data_length = wlen;
1048 hdr.snapid = snapid;
1049 hdr.flags = SD_FLAG_CMD_WRITE;
1050
1051 ret = do_req(fd, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
1052 if (ret) {
1053 goto out;
1054 }
1055
1056 if (rsp->result != SD_RES_SUCCESS) {
1057 error_report("cannot get vdi info, %s, %s %d %s",
1058 sd_strerror(rsp->result), filename, snapid, tag);
1059 if (rsp->result == SD_RES_NO_VDI) {
1060 ret = -ENOENT;
1061 } else {
1062 ret = -EIO;
1063 }
1064 goto out;
1065 }
1066 *vid = rsp->vdi_id;
1067
1068 ret = 0;
1069 out:
1070 closesocket(fd);
1071 return ret;
1072 }
1073
1074 static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
1075 struct iovec *iov, int niov, bool create,
1076 enum AIOCBState aiocb_type)
1077 {
1078 int nr_copies = s->inode.nr_copies;
1079 SheepdogObjReq hdr;
1080 unsigned int wlen = 0;
1081 int ret;
1082 uint64_t oid = aio_req->oid;
1083 unsigned int datalen = aio_req->data_len;
1084 uint64_t offset = aio_req->offset;
1085 uint8_t flags = aio_req->flags;
1086 uint64_t old_oid = aio_req->base_oid;
1087
1088 if (!nr_copies) {
1089 error_report("bug");
1090 }
1091
1092 memset(&hdr, 0, sizeof(hdr));
1093
1094 switch (aiocb_type) {
1095 case AIOCB_FLUSH_CACHE:
1096 hdr.opcode = SD_OP_FLUSH_VDI;
1097 break;
1098 case AIOCB_READ_UDATA:
1099 hdr.opcode = SD_OP_READ_OBJ;
1100 hdr.flags = flags;
1101 break;
1102 case AIOCB_WRITE_UDATA:
1103 if (create) {
1104 hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ;
1105 } else {
1106 hdr.opcode = SD_OP_WRITE_OBJ;
1107 }
1108 wlen = datalen;
1109 hdr.flags = SD_FLAG_CMD_WRITE | flags;
1110 break;
1111 case AIOCB_DISCARD_OBJ:
1112 hdr.opcode = SD_OP_DISCARD_OBJ;
1113 break;
1114 }
1115
1116 if (s->cache_flags) {
1117 hdr.flags |= s->cache_flags;
1118 }
1119
1120 hdr.oid = oid;
1121 hdr.cow_oid = old_oid;
1122 hdr.copies = s->inode.nr_copies;
1123
1124 hdr.data_length = datalen;
1125 hdr.offset = offset;
1126
1127 hdr.id = aio_req->id;
1128
1129 qemu_co_mutex_lock(&s->lock);
1130 s->co_send = qemu_coroutine_self();
1131 qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, s);
1132 socket_set_cork(s->fd, 1);
1133
1134 /* send a header */
1135 ret = qemu_co_send(s->fd, &hdr, sizeof(hdr));
1136 if (ret != sizeof(hdr)) {
1137 error_report("failed to send a req, %s", strerror(errno));
1138 goto out;
1139 }
1140
1141 if (wlen) {
1142 ret = qemu_co_sendv(s->fd, iov, niov, aio_req->iov_offset, wlen);
1143 if (ret != wlen) {
1144 error_report("failed to send a data, %s", strerror(errno));
1145 }
1146 }
1147 out:
1148 socket_set_cork(s->fd, 0);
1149 qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, s);
1150 s->co_send = NULL;
1151 qemu_co_mutex_unlock(&s->lock);
1152
1153 return 0;
1154 }
1155
1156 static int read_write_object(int fd, char *buf, uint64_t oid, uint8_t copies,
1157 unsigned int datalen, uint64_t offset,
1158 bool write, bool create, uint32_t cache_flags)
1159 {
1160 SheepdogObjReq hdr;
1161 SheepdogObjRsp *rsp = (SheepdogObjRsp *)&hdr;
1162 unsigned int wlen, rlen;
1163 int ret;
1164
1165 memset(&hdr, 0, sizeof(hdr));
1166
1167 if (write) {
1168 wlen = datalen;
1169 rlen = 0;
1170 hdr.flags = SD_FLAG_CMD_WRITE;
1171 if (create) {
1172 hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ;
1173 } else {
1174 hdr.opcode = SD_OP_WRITE_OBJ;
1175 }
1176 } else {
1177 wlen = 0;
1178 rlen = datalen;
1179 hdr.opcode = SD_OP_READ_OBJ;
1180 }
1181
1182 hdr.flags |= cache_flags;
1183
1184 hdr.oid = oid;
1185 hdr.data_length = datalen;
1186 hdr.offset = offset;
1187 hdr.copies = copies;
1188
1189 ret = do_req(fd, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
1190 if (ret) {
1191 error_report("failed to send a request to the sheep");
1192 return ret;
1193 }
1194
1195 switch (rsp->result) {
1196 case SD_RES_SUCCESS:
1197 return 0;
1198 default:
1199 error_report("%s", sd_strerror(rsp->result));
1200 return -EIO;
1201 }
1202 }
1203
1204 static int read_object(int fd, char *buf, uint64_t oid, uint8_t copies,
1205 unsigned int datalen, uint64_t offset,
1206 uint32_t cache_flags)
1207 {
1208 return read_write_object(fd, buf, oid, copies, datalen, offset, false,
1209 false, cache_flags);
1210 }
1211
1212 static int write_object(int fd, char *buf, uint64_t oid, uint8_t copies,
1213 unsigned int datalen, uint64_t offset, bool create,
1214 uint32_t cache_flags)
1215 {
1216 return read_write_object(fd, buf, oid, copies, datalen, offset, true,
1217 create, cache_flags);
1218 }
1219
1220 /* update inode with the latest state */
1221 static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag)
1222 {
1223 SheepdogInode *inode;
1224 int ret = 0, fd;
1225 uint32_t vid = 0;
1226
1227 fd = connect_to_sdog(s);
1228 if (fd < 0) {
1229 return -EIO;
1230 }
1231
1232 inode = g_malloc(sizeof(s->inode));
1233
1234 ret = find_vdi_name(s, s->name, snapid, tag, &vid, false);
1235 if (ret) {
1236 goto out;
1237 }
1238
1239 ret = read_object(fd, (char *)inode, vid_to_vdi_oid(vid),
1240 s->inode.nr_copies, sizeof(*inode), 0, s->cache_flags);
1241 if (ret < 0) {
1242 goto out;
1243 }
1244
1245 if (inode->vdi_id != s->inode.vdi_id) {
1246 memcpy(&s->inode, inode, sizeof(s->inode));
1247 }
1248
1249 out:
1250 g_free(inode);
1251 closesocket(fd);
1252
1253 return ret;
1254 }
1255
1256 static int coroutine_fn resend_aioreq(BDRVSheepdogState *s, AIOReq *aio_req)
1257 {
1258 SheepdogAIOCB *acb = aio_req->aiocb;
1259 bool create = false;
1260
1261 /* check whether this request becomes a CoW one */
1262 if (acb->aiocb_type == AIOCB_WRITE_UDATA && is_data_obj(aio_req->oid)) {
1263 int idx = data_oid_to_idx(aio_req->oid);
1264 AIOReq *areq;
1265
1266 if (s->inode.data_vdi_id[idx] == 0) {
1267 create = true;
1268 goto out;
1269 }
1270 if (is_data_obj_writable(&s->inode, idx)) {
1271 goto out;
1272 }
1273
1274 /* link to the pending list if there is another CoW request to
1275 * the same object */
1276 QLIST_FOREACH(areq, &s->inflight_aio_head, aio_siblings) {
1277 if (areq != aio_req && areq->oid == aio_req->oid) {
1278 DPRINTF("simultaneous CoW to %" PRIx64 "\n", aio_req->oid);
1279 QLIST_REMOVE(aio_req, aio_siblings);
1280 QLIST_INSERT_HEAD(&s->pending_aio_head, aio_req, aio_siblings);
1281 return SD_RES_SUCCESS;
1282 }
1283 }
1284
1285 aio_req->base_oid = vid_to_data_oid(s->inode.data_vdi_id[idx], idx);
1286 aio_req->flags |= SD_FLAG_CMD_COW;
1287 create = true;
1288 }
1289 out:
1290 if (is_data_obj(aio_req->oid)) {
1291 return add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov,
1292 create, acb->aiocb_type);
1293 } else {
1294 struct iovec iov;
1295 iov.iov_base = &s->inode;
1296 iov.iov_len = sizeof(s->inode);
1297 return add_aio_request(s, aio_req, &iov, 1, false, AIOCB_WRITE_UDATA);
1298 }
1299 }
1300
1301 /* TODO Convert to fine grained options */
1302 static QemuOptsList runtime_opts = {
1303 .name = "sheepdog",
1304 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
1305 .desc = {
1306 {
1307 .name = "filename",
1308 .type = QEMU_OPT_STRING,
1309 .help = "URL to the sheepdog image",
1310 },
1311 { /* end of list */ }
1312 },
1313 };
1314
1315 static int sd_open(BlockDriverState *bs, QDict *options, int flags,
1316 Error **errp)
1317 {
1318 int ret, fd;
1319 uint32_t vid = 0;
1320 BDRVSheepdogState *s = bs->opaque;
1321 char vdi[SD_MAX_VDI_LEN], tag[SD_MAX_VDI_TAG_LEN];
1322 uint32_t snapid;
1323 char *buf = NULL;
1324 QemuOpts *opts;
1325 Error *local_err = NULL;
1326 const char *filename;
1327
1328 s->bs = bs;
1329
1330 opts = qemu_opts_create_nofail(&runtime_opts);
1331 qemu_opts_absorb_qdict(opts, options, &local_err);
1332 if (error_is_set(&local_err)) {
1333 qerror_report_err(local_err);
1334 error_free(local_err);
1335 ret = -EINVAL;
1336 goto out;
1337 }
1338
1339 filename = qemu_opt_get(opts, "filename");
1340
1341 QLIST_INIT(&s->inflight_aio_head);
1342 QLIST_INIT(&s->pending_aio_head);
1343 QLIST_INIT(&s->failed_aio_head);
1344 s->fd = -1;
1345
1346 memset(vdi, 0, sizeof(vdi));
1347 memset(tag, 0, sizeof(tag));
1348
1349 if (strstr(filename, "://")) {
1350 ret = sd_parse_uri(s, filename, vdi, &snapid, tag);
1351 } else {
1352 ret = parse_vdiname(s, filename, vdi, &snapid, tag);
1353 }
1354 if (ret < 0) {
1355 goto out;
1356 }
1357 s->fd = get_sheep_fd(s);
1358 if (s->fd < 0) {
1359 ret = s->fd;
1360 goto out;
1361 }
1362
1363 ret = find_vdi_name(s, vdi, snapid, tag, &vid, true);
1364 if (ret) {
1365 goto out;
1366 }
1367
1368 /*
1369 * QEMU block layer emulates writethrough cache as 'writeback + flush', so
1370 * we always set SD_FLAG_CMD_CACHE (writeback cache) as default.
1371 */
1372 s->cache_flags = SD_FLAG_CMD_CACHE;
1373 if (flags & BDRV_O_NOCACHE) {
1374 s->cache_flags = SD_FLAG_CMD_DIRECT;
1375 }
1376 s->discard_supported = true;
1377
1378 if (snapid || tag[0] != '\0') {
1379 DPRINTF("%" PRIx32 " snapshot inode was open.\n", vid);
1380 s->is_snapshot = true;
1381 }
1382
1383 fd = connect_to_sdog(s);
1384 if (fd < 0) {
1385 ret = fd;
1386 goto out;
1387 }
1388
1389 buf = g_malloc(SD_INODE_SIZE);
1390 ret = read_object(fd, buf, vid_to_vdi_oid(vid), 0, SD_INODE_SIZE, 0,
1391 s->cache_flags);
1392
1393 closesocket(fd);
1394
1395 if (ret) {
1396 goto out;
1397 }
1398
1399 memcpy(&s->inode, buf, sizeof(s->inode));
1400 s->min_dirty_data_idx = UINT32_MAX;
1401 s->max_dirty_data_idx = 0;
1402
1403 bs->total_sectors = s->inode.vdi_size / BDRV_SECTOR_SIZE;
1404 pstrcpy(s->name, sizeof(s->name), vdi);
1405 qemu_co_mutex_init(&s->lock);
1406 qemu_opts_del(opts);
1407 g_free(buf);
1408 return 0;
1409 out:
1410 qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
1411 if (s->fd >= 0) {
1412 closesocket(s->fd);
1413 }
1414 qemu_opts_del(opts);
1415 g_free(buf);
1416 return ret;
1417 }
1418
1419 static int do_sd_create(BDRVSheepdogState *s, char *filename, int64_t vdi_size,
1420 uint32_t base_vid, uint32_t *vdi_id, int snapshot,
1421 uint8_t copy_policy)
1422 {
1423 SheepdogVdiReq hdr;
1424 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
1425 int fd, ret;
1426 unsigned int wlen, rlen = 0;
1427 char buf[SD_MAX_VDI_LEN];
1428
1429 fd = connect_to_sdog(s);
1430 if (fd < 0) {
1431 return fd;
1432 }
1433
1434 /* FIXME: would it be better to fail (e.g., return -EIO) when filename
1435 * does not fit in buf? For now, just truncate and avoid buffer overrun.
1436 */
1437 memset(buf, 0, sizeof(buf));
1438 pstrcpy(buf, sizeof(buf), filename);
1439
1440 memset(&hdr, 0, sizeof(hdr));
1441 hdr.opcode = SD_OP_NEW_VDI;
1442 hdr.vdi_id = base_vid;
1443
1444 wlen = SD_MAX_VDI_LEN;
1445
1446 hdr.flags = SD_FLAG_CMD_WRITE;
1447 hdr.snapid = snapshot;
1448
1449 hdr.data_length = wlen;
1450 hdr.vdi_size = vdi_size;
1451 hdr.copy_policy = copy_policy;
1452
1453 ret = do_req(fd, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
1454
1455 closesocket(fd);
1456
1457 if (ret) {
1458 return ret;
1459 }
1460
1461 if (rsp->result != SD_RES_SUCCESS) {
1462 error_report("%s, %s", sd_strerror(rsp->result), filename);
1463 return -EIO;
1464 }
1465
1466 if (vdi_id) {
1467 *vdi_id = rsp->vdi_id;
1468 }
1469
1470 return 0;
1471 }
1472
1473 static int sd_prealloc(const char *filename)
1474 {
1475 BlockDriverState *bs = NULL;
1476 uint32_t idx, max_idx;
1477 int64_t vdi_size;
1478 void *buf = g_malloc0(SD_DATA_OBJ_SIZE);
1479 Error *local_err = NULL;
1480 int ret;
1481
1482 ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR, &local_err);
1483 if (ret < 0) {
1484 qerror_report_err(local_err);
1485 error_free(local_err);
1486 goto out;
1487 }
1488
1489 vdi_size = bdrv_getlength(bs);
1490 if (vdi_size < 0) {
1491 ret = vdi_size;
1492 goto out;
1493 }
1494 max_idx = DIV_ROUND_UP(vdi_size, SD_DATA_OBJ_SIZE);
1495
1496 for (idx = 0; idx < max_idx; idx++) {
1497 /*
1498 * The created image can be a cloned image, so we need to read
1499 * a data from the source image.
1500 */
1501 ret = bdrv_pread(bs, idx * SD_DATA_OBJ_SIZE, buf, SD_DATA_OBJ_SIZE);
1502 if (ret < 0) {
1503 goto out;
1504 }
1505 ret = bdrv_pwrite(bs, idx * SD_DATA_OBJ_SIZE, buf, SD_DATA_OBJ_SIZE);
1506 if (ret < 0) {
1507 goto out;
1508 }
1509 }
1510 out:
1511 if (bs) {
1512 bdrv_unref(bs);
1513 }
1514 g_free(buf);
1515
1516 return ret;
1517 }
1518
1519 static int sd_create(const char *filename, QEMUOptionParameter *options,
1520 Error **errp)
1521 {
1522 int ret = 0;
1523 uint32_t vid = 0, base_vid = 0;
1524 int64_t vdi_size = 0;
1525 char *backing_file = NULL;
1526 BDRVSheepdogState *s;
1527 char vdi[SD_MAX_VDI_LEN], tag[SD_MAX_VDI_TAG_LEN];
1528 uint32_t snapid;
1529 bool prealloc = false;
1530 Error *local_err = NULL;
1531
1532 s = g_malloc0(sizeof(BDRVSheepdogState));
1533
1534 memset(vdi, 0, sizeof(vdi));
1535 memset(tag, 0, sizeof(tag));
1536 if (strstr(filename, "://")) {
1537 ret = sd_parse_uri(s, filename, vdi, &snapid, tag);
1538 } else {
1539 ret = parse_vdiname(s, filename, vdi, &snapid, tag);
1540 }
1541 if (ret < 0) {
1542 goto out;
1543 }
1544
1545 while (options && options->name) {
1546 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
1547 vdi_size = options->value.n;
1548 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
1549 backing_file = options->value.s;
1550 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) {
1551 if (!options->value.s || !strcmp(options->value.s, "off")) {
1552 prealloc = false;
1553 } else if (!strcmp(options->value.s, "full")) {
1554 prealloc = true;
1555 } else {
1556 error_report("Invalid preallocation mode: '%s'",
1557 options->value.s);
1558 ret = -EINVAL;
1559 goto out;
1560 }
1561 }
1562 options++;
1563 }
1564
1565 if (vdi_size > SD_MAX_VDI_SIZE) {
1566 error_report("too big image size");
1567 ret = -EINVAL;
1568 goto out;
1569 }
1570
1571 if (backing_file) {
1572 BlockDriverState *bs;
1573 BDRVSheepdogState *s;
1574 BlockDriver *drv;
1575
1576 /* Currently, only Sheepdog backing image is supported. */
1577 drv = bdrv_find_protocol(backing_file, true);
1578 if (!drv || strcmp(drv->protocol_name, "sheepdog") != 0) {
1579 error_report("backing_file must be a sheepdog image");
1580 ret = -EINVAL;
1581 goto out;
1582 }
1583
1584 ret = bdrv_file_open(&bs, backing_file, NULL, 0, &local_err);
1585 if (ret < 0) {
1586 qerror_report_err(local_err);
1587 error_free(local_err);
1588 goto out;
1589 }
1590
1591 s = bs->opaque;
1592
1593 if (!is_snapshot(&s->inode)) {
1594 error_report("cannot clone from a non snapshot vdi");
1595 bdrv_unref(bs);
1596 ret = -EINVAL;
1597 goto out;
1598 }
1599
1600 base_vid = s->inode.vdi_id;
1601 bdrv_unref(bs);
1602 }
1603
1604 /* TODO: allow users to specify copy number */
1605 ret = do_sd_create(s, vdi, vdi_size, base_vid, &vid, 0, 0);
1606 if (!prealloc || ret) {
1607 goto out;
1608 }
1609
1610 ret = sd_prealloc(filename);
1611 out:
1612 g_free(s);
1613 return ret;
1614 }
1615
1616 static void sd_close(BlockDriverState *bs)
1617 {
1618 BDRVSheepdogState *s = bs->opaque;
1619 SheepdogVdiReq hdr;
1620 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
1621 unsigned int wlen, rlen = 0;
1622 int fd, ret;
1623
1624 DPRINTF("%s\n", s->name);
1625
1626 fd = connect_to_sdog(s);
1627 if (fd < 0) {
1628 return;
1629 }
1630
1631 memset(&hdr, 0, sizeof(hdr));
1632
1633 hdr.opcode = SD_OP_RELEASE_VDI;
1634 hdr.vdi_id = s->inode.vdi_id;
1635 wlen = strlen(s->name) + 1;
1636 hdr.data_length = wlen;
1637 hdr.flags = SD_FLAG_CMD_WRITE;
1638
1639 ret = do_req(fd, (SheepdogReq *)&hdr, s->name, &wlen, &rlen);
1640
1641 closesocket(fd);
1642
1643 if (!ret && rsp->result != SD_RES_SUCCESS &&
1644 rsp->result != SD_RES_VDI_NOT_LOCKED) {
1645 error_report("%s, %s", sd_strerror(rsp->result), s->name);
1646 }
1647
1648 qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
1649 closesocket(s->fd);
1650 g_free(s->host_spec);
1651 }
1652
1653 static int64_t sd_getlength(BlockDriverState *bs)
1654 {
1655 BDRVSheepdogState *s = bs->opaque;
1656
1657 return s->inode.vdi_size;
1658 }
1659
1660 static int sd_truncate(BlockDriverState *bs, int64_t offset)
1661 {
1662 BDRVSheepdogState *s = bs->opaque;
1663 int ret, fd;
1664 unsigned int datalen;
1665
1666 if (offset < s->inode.vdi_size) {
1667 error_report("shrinking is not supported");
1668 return -EINVAL;
1669 } else if (offset > SD_MAX_VDI_SIZE) {
1670 error_report("too big image size");
1671 return -EINVAL;
1672 }
1673
1674 fd = connect_to_sdog(s);
1675 if (fd < 0) {
1676 return fd;
1677 }
1678
1679 /* we don't need to update entire object */
1680 datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id);
1681 s->inode.vdi_size = offset;
1682 ret = write_object(fd, (char *)&s->inode, vid_to_vdi_oid(s->inode.vdi_id),
1683 s->inode.nr_copies, datalen, 0, false, s->cache_flags);
1684 close(fd);
1685
1686 if (ret < 0) {
1687 error_report("failed to update an inode.");
1688 }
1689
1690 return ret;
1691 }
1692
1693 /*
1694 * This function is called after writing data objects. If we need to
1695 * update metadata, this sends a write request to the vdi object.
1696 * Otherwise, this switches back to sd_co_readv/writev.
1697 */
1698 static void coroutine_fn sd_write_done(SheepdogAIOCB *acb)
1699 {
1700 int ret;
1701 BDRVSheepdogState *s = acb->common.bs->opaque;
1702 struct iovec iov;
1703 AIOReq *aio_req;
1704 uint32_t offset, data_len, mn, mx;
1705
1706 mn = s->min_dirty_data_idx;
1707 mx = s->max_dirty_data_idx;
1708 if (mn <= mx) {
1709 /* we need to update the vdi object. */
1710 offset = sizeof(s->inode) - sizeof(s->inode.data_vdi_id) +
1711 mn * sizeof(s->inode.data_vdi_id[0]);
1712 data_len = (mx - mn + 1) * sizeof(s->inode.data_vdi_id[0]);
1713
1714 s->min_dirty_data_idx = UINT32_MAX;
1715 s->max_dirty_data_idx = 0;
1716
1717 iov.iov_base = &s->inode;
1718 iov.iov_len = sizeof(s->inode);
1719 aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id),
1720 data_len, offset, 0, 0, offset);
1721 QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
1722 ret = add_aio_request(s, aio_req, &iov, 1, false, AIOCB_WRITE_UDATA);
1723 if (ret) {
1724 free_aio_req(s, aio_req);
1725 acb->ret = -EIO;
1726 goto out;
1727 }
1728
1729 acb->aio_done_func = sd_finish_aiocb;
1730 acb->aiocb_type = AIOCB_WRITE_UDATA;
1731 return;
1732 }
1733 out:
1734 sd_finish_aiocb(acb);
1735 }
1736
1737 /* Delete current working VDI on the snapshot chain */
1738 static bool sd_delete(BDRVSheepdogState *s)
1739 {
1740 unsigned int wlen = SD_MAX_VDI_LEN, rlen = 0;
1741 SheepdogVdiReq hdr = {
1742 .opcode = SD_OP_DEL_VDI,
1743 .vdi_id = s->inode.vdi_id,
1744 .data_length = wlen,
1745 .flags = SD_FLAG_CMD_WRITE,
1746 };
1747 SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr;
1748 int fd, ret;
1749
1750 fd = connect_to_sdog(s);
1751 if (fd < 0) {
1752 return false;
1753 }
1754
1755 ret = do_req(fd, (SheepdogReq *)&hdr, s->name, &wlen, &rlen);
1756 closesocket(fd);
1757 if (ret) {
1758 return false;
1759 }
1760 switch (rsp->result) {
1761 case SD_RES_NO_VDI:
1762 error_report("%s was already deleted", s->name);
1763 /* fall through */
1764 case SD_RES_SUCCESS:
1765 break;
1766 default:
1767 error_report("%s, %s", sd_strerror(rsp->result), s->name);
1768 return false;
1769 }
1770
1771 return true;
1772 }
1773
1774 /*
1775 * Create a writable VDI from a snapshot
1776 */
1777 static int sd_create_branch(BDRVSheepdogState *s)
1778 {
1779 int ret, fd;
1780 uint32_t vid;
1781 char *buf;
1782 bool deleted;
1783
1784 DPRINTF("%" PRIx32 " is snapshot.\n", s->inode.vdi_id);
1785
1786 buf = g_malloc(SD_INODE_SIZE);
1787
1788 /*
1789 * Even If deletion fails, we will just create extra snapshot based on
1790 * the workding VDI which was supposed to be deleted. So no need to
1791 * false bail out.
1792 */
1793 deleted = sd_delete(s);
1794 ret = do_sd_create(s, s->name, s->inode.vdi_size, s->inode.vdi_id, &vid,
1795 !deleted, s->inode.copy_policy);
1796 if (ret) {
1797 goto out;
1798 }
1799
1800 DPRINTF("%" PRIx32 " is created.\n", vid);
1801
1802 fd = connect_to_sdog(s);
1803 if (fd < 0) {
1804 ret = fd;
1805 goto out;
1806 }
1807
1808 ret = read_object(fd, buf, vid_to_vdi_oid(vid), s->inode.nr_copies,
1809 SD_INODE_SIZE, 0, s->cache_flags);
1810
1811 closesocket(fd);
1812
1813 if (ret < 0) {
1814 goto out;
1815 }
1816
1817 memcpy(&s->inode, buf, sizeof(s->inode));
1818
1819 s->is_snapshot = false;
1820 ret = 0;
1821 DPRINTF("%" PRIx32 " was newly created.\n", s->inode.vdi_id);
1822
1823 out:
1824 g_free(buf);
1825
1826 return ret;
1827 }
1828
1829 /*
1830 * Send I/O requests to the server.
1831 *
1832 * This function sends requests to the server, links the requests to
1833 * the inflight_list in BDRVSheepdogState, and exits without
1834 * waiting the response. The responses are received in the
1835 * `aio_read_response' function which is called from the main loop as
1836 * a fd handler.
1837 *
1838 * Returns 1 when we need to wait a response, 0 when there is no sent
1839 * request and -errno in error cases.
1840 */
1841 static int coroutine_fn sd_co_rw_vector(void *p)
1842 {
1843 SheepdogAIOCB *acb = p;
1844 int ret = 0;
1845 unsigned long len, done = 0, total = acb->nb_sectors * BDRV_SECTOR_SIZE;
1846 unsigned long idx = acb->sector_num * BDRV_SECTOR_SIZE / SD_DATA_OBJ_SIZE;
1847 uint64_t oid;
1848 uint64_t offset = (acb->sector_num * BDRV_SECTOR_SIZE) % SD_DATA_OBJ_SIZE;
1849 BDRVSheepdogState *s = acb->common.bs->opaque;
1850 SheepdogInode *inode = &s->inode;
1851 AIOReq *aio_req;
1852
1853 if (acb->aiocb_type == AIOCB_WRITE_UDATA && s->is_snapshot) {
1854 /*
1855 * In the case we open the snapshot VDI, Sheepdog creates the
1856 * writable VDI when we do a write operation first.
1857 */
1858 ret = sd_create_branch(s);
1859 if (ret) {
1860 acb->ret = -EIO;
1861 goto out;
1862 }
1863 }
1864
1865 /*
1866 * Make sure we don't free the aiocb before we are done with all requests.
1867 * This additional reference is dropped at the end of this function.
1868 */
1869 acb->nr_pending++;
1870
1871 while (done != total) {
1872 uint8_t flags = 0;
1873 uint64_t old_oid = 0;
1874 bool create = false;
1875
1876 oid = vid_to_data_oid(inode->data_vdi_id[idx], idx);
1877
1878 len = MIN(total - done, SD_DATA_OBJ_SIZE - offset);
1879
1880 switch (acb->aiocb_type) {
1881 case AIOCB_READ_UDATA:
1882 if (!inode->data_vdi_id[idx]) {
1883 qemu_iovec_memset(acb->qiov, done, 0, len);
1884 goto done;
1885 }
1886 break;
1887 case AIOCB_WRITE_UDATA:
1888 if (!inode->data_vdi_id[idx]) {
1889 create = true;
1890 } else if (!is_data_obj_writable(inode, idx)) {
1891 /* Copy-On-Write */
1892 create = true;
1893 old_oid = oid;
1894 flags = SD_FLAG_CMD_COW;
1895 }
1896 break;
1897 case AIOCB_DISCARD_OBJ:
1898 /*
1899 * We discard the object only when the whole object is
1900 * 1) allocated 2) trimmed. Otherwise, simply skip it.
1901 */
1902 if (len != SD_DATA_OBJ_SIZE || inode->data_vdi_id[idx] == 0) {
1903 goto done;
1904 }
1905 break;
1906 default:
1907 break;
1908 }
1909
1910 if (create) {
1911 DPRINTF("update ino (%" PRIu32 ") %" PRIu64 " %" PRIu64 " %ld\n",
1912 inode->vdi_id, oid,
1913 vid_to_data_oid(inode->data_vdi_id[idx], idx), idx);
1914 oid = vid_to_data_oid(inode->vdi_id, idx);
1915 DPRINTF("new oid %" PRIx64 "\n", oid);
1916 }
1917
1918 aio_req = alloc_aio_req(s, acb, oid, len, offset, flags, old_oid, done);
1919
1920 if (create) {
1921 AIOReq *areq;
1922 QLIST_FOREACH(areq, &s->inflight_aio_head, aio_siblings) {
1923 if (areq->oid == oid) {
1924 /*
1925 * Sheepdog cannot handle simultaneous create
1926 * requests to the same object. So we cannot send
1927 * the request until the previous request
1928 * finishes.
1929 */
1930 aio_req->flags = 0;
1931 aio_req->base_oid = 0;
1932 QLIST_INSERT_HEAD(&s->pending_aio_head, aio_req,
1933 aio_siblings);
1934 goto done;
1935 }
1936 }
1937 }
1938
1939 QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
1940 ret = add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov,
1941 create, acb->aiocb_type);
1942 if (ret < 0) {
1943 error_report("add_aio_request is failed");
1944 free_aio_req(s, aio_req);
1945 acb->ret = -EIO;
1946 goto out;
1947 }
1948 done:
1949 offset = 0;
1950 idx++;
1951 done += len;
1952 }
1953 out:
1954 if (!--acb->nr_pending) {
1955 return acb->ret;
1956 }
1957 return 1;
1958 }
1959
1960 static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
1961 int nb_sectors, QEMUIOVector *qiov)
1962 {
1963 SheepdogAIOCB *acb;
1964 int ret;
1965
1966 if (bs->growable && sector_num + nb_sectors > bs->total_sectors) {
1967 ret = sd_truncate(bs, (sector_num + nb_sectors) * BDRV_SECTOR_SIZE);
1968 if (ret < 0) {
1969 return ret;
1970 }
1971 bs->total_sectors = sector_num + nb_sectors;
1972 }
1973
1974 acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors);
1975 acb->aio_done_func = sd_write_done;
1976 acb->aiocb_type = AIOCB_WRITE_UDATA;
1977
1978 ret = sd_co_rw_vector(acb);
1979 if (ret <= 0) {
1980 qemu_aio_release(acb);
1981 return ret;
1982 }
1983
1984 qemu_coroutine_yield();
1985
1986 return acb->ret;
1987 }
1988
1989 static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num,
1990 int nb_sectors, QEMUIOVector *qiov)
1991 {
1992 SheepdogAIOCB *acb;
1993 int ret;
1994
1995 acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors);
1996 acb->aiocb_type = AIOCB_READ_UDATA;
1997 acb->aio_done_func = sd_finish_aiocb;
1998
1999 ret = sd_co_rw_vector(acb);
2000 if (ret <= 0) {
2001 qemu_aio_release(acb);
2002 return ret;
2003 }
2004
2005 qemu_coroutine_yield();
2006
2007 return acb->ret;
2008 }
2009
2010 static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs)
2011 {
2012 BDRVSheepdogState *s = bs->opaque;
2013 SheepdogAIOCB *acb;
2014 AIOReq *aio_req;
2015 int ret;
2016
2017 if (s->cache_flags != SD_FLAG_CMD_CACHE) {
2018 return 0;
2019 }
2020
2021 acb = sd_aio_setup(bs, NULL, 0, 0);
2022 acb->aiocb_type = AIOCB_FLUSH_CACHE;
2023 acb->aio_done_func = sd_finish_aiocb;
2024
2025 aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id),
2026 0, 0, 0, 0, 0);
2027 QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
2028 ret = add_aio_request(s, aio_req, NULL, 0, false, acb->aiocb_type);
2029 if (ret < 0) {
2030 error_report("add_aio_request is failed");
2031 free_aio_req(s, aio_req);
2032 qemu_aio_release(acb);
2033 return ret;
2034 }
2035
2036 qemu_coroutine_yield();
2037 return acb->ret;
2038 }
2039
2040 static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
2041 {
2042 BDRVSheepdogState *s = bs->opaque;
2043 int ret, fd;
2044 uint32_t new_vid;
2045 SheepdogInode *inode;
2046 unsigned int datalen;
2047
2048 DPRINTF("sn_info: name %s id_str %s s: name %s vm_state_size %" PRId64 " "
2049 "is_snapshot %d\n", sn_info->name, sn_info->id_str,
2050 s->name, sn_info->vm_state_size, s->is_snapshot);
2051
2052 if (s->is_snapshot) {
2053 error_report("You can't create a snapshot of a snapshot VDI, "
2054 "%s (%" PRIu32 ").", s->name, s->inode.vdi_id);
2055
2056 return -EINVAL;
2057 }
2058
2059 DPRINTF("%s %s\n", sn_info->name, sn_info->id_str);
2060
2061 s->inode.vm_state_size = sn_info->vm_state_size;
2062 s->inode.vm_clock_nsec = sn_info->vm_clock_nsec;
2063 /* It appears that inode.tag does not require a NUL terminator,
2064 * which means this use of strncpy is ok.
2065 */
2066 strncpy(s->inode.tag, sn_info->name, sizeof(s->inode.tag));
2067 /* we don't need to update entire object */
2068 datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id);
2069
2070 /* refresh inode. */
2071 fd = connect_to_sdog(s);
2072 if (fd < 0) {
2073 ret = fd;
2074 goto cleanup;
2075 }
2076
2077 ret = write_object(fd, (char *)&s->inode, vid_to_vdi_oid(s->inode.vdi_id),
2078 s->inode.nr_copies, datalen, 0, false, s->cache_flags);
2079 if (ret < 0) {
2080 error_report("failed to write snapshot's inode.");
2081 goto cleanup;
2082 }
2083
2084 ret = do_sd_create(s, s->name, s->inode.vdi_size, s->inode.vdi_id, &new_vid,
2085 1, s->inode.copy_policy);
2086 if (ret < 0) {
2087 error_report("failed to create inode for snapshot. %s",
2088 strerror(errno));
2089 goto cleanup;
2090 }
2091
2092 inode = (SheepdogInode *)g_malloc(datalen);
2093
2094 ret = read_object(fd, (char *)inode, vid_to_vdi_oid(new_vid),
2095 s->inode.nr_copies, datalen, 0, s->cache_flags);
2096
2097 if (ret < 0) {
2098 error_report("failed to read new inode info. %s", strerror(errno));
2099 goto cleanup;
2100 }
2101
2102 memcpy(&s->inode, inode, datalen);
2103 DPRINTF("s->inode: name %s snap_id %x oid %x\n",
2104 s->inode.name, s->inode.snap_id, s->inode.vdi_id);
2105
2106 cleanup:
2107 closesocket(fd);
2108 return ret;
2109 }
2110
2111 /*
2112 * We implement rollback(loadvm) operation to the specified snapshot by
2113 * 1) switch to the snapshot
2114 * 2) rely on sd_create_branch to delete working VDI and
2115 * 3) create a new working VDI based on the speicified snapshot
2116 */
2117 static int sd_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
2118 {
2119 BDRVSheepdogState *s = bs->opaque;
2120 BDRVSheepdogState *old_s;
2121 char tag[SD_MAX_VDI_TAG_LEN];
2122 uint32_t snapid = 0;
2123 int ret = 0;
2124
2125 old_s = g_malloc(sizeof(BDRVSheepdogState));
2126
2127 memcpy(old_s, s, sizeof(BDRVSheepdogState));
2128
2129 snapid = strtoul(snapshot_id, NULL, 10);
2130 if (snapid) {
2131 tag[0] = 0;
2132 } else {
2133 pstrcpy(tag, sizeof(tag), snapshot_id);
2134 }
2135
2136 ret = reload_inode(s, snapid, tag);
2137 if (ret) {
2138 goto out;
2139 }
2140
2141 ret = sd_create_branch(s);
2142 if (ret) {
2143 goto out;
2144 }
2145
2146 g_free(old_s);
2147
2148 return 0;
2149 out:
2150 /* recover bdrv_sd_state */
2151 memcpy(s, old_s, sizeof(BDRVSheepdogState));
2152 g_free(old_s);
2153
2154 error_report("failed to open. recover old bdrv_sd_state.");
2155
2156 return ret;
2157 }
2158
2159 static int sd_snapshot_delete(BlockDriverState *bs,
2160 const char *snapshot_id,
2161 const char *name,
2162 Error **errp)
2163 {
2164 /* FIXME: Delete specified snapshot id. */
2165 return 0;
2166 }
2167
2168 static int sd_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab)
2169 {
2170 BDRVSheepdogState *s = bs->opaque;
2171 SheepdogReq req;
2172 int fd, nr = 1024, ret, max = BITS_TO_LONGS(SD_NR_VDIS) * sizeof(long);
2173 QEMUSnapshotInfo *sn_tab = NULL;
2174 unsigned wlen, rlen;
2175 int found = 0;
2176 static SheepdogInode inode;
2177 unsigned long *vdi_inuse;
2178 unsigned int start_nr;
2179 uint64_t hval;
2180 uint32_t vid;
2181
2182 vdi_inuse = g_malloc(max);
2183
2184 fd = connect_to_sdog(s);
2185 if (fd < 0) {
2186 ret = fd;
2187 goto out;
2188 }
2189
2190 rlen = max;
2191 wlen = 0;
2192
2193 memset(&req, 0, sizeof(req));
2194
2195 req.opcode = SD_OP_READ_VDIS;
2196 req.data_length = max;
2197
2198 ret = do_req(fd, (SheepdogReq *)&req, vdi_inuse, &wlen, &rlen);
2199
2200 closesocket(fd);
2201 if (ret) {
2202 goto out;
2203 }
2204
2205 sn_tab = g_malloc0(nr * sizeof(*sn_tab));
2206
2207 /* calculate a vdi id with hash function */
2208 hval = fnv_64a_buf(s->name, strlen(s->name), FNV1A_64_INIT);
2209 start_nr = hval & (SD_NR_VDIS - 1);
2210
2211 fd = connect_to_sdog(s);
2212 if (fd < 0) {
2213 ret = fd;
2214 goto out;
2215 }
2216
2217 for (vid = start_nr; found < nr; vid = (vid + 1) % SD_NR_VDIS) {
2218 if (!test_bit(vid, vdi_inuse)) {
2219 break;
2220 }
2221
2222 /* we don't need to read entire object */
2223 ret = read_object(fd, (char *)&inode, vid_to_vdi_oid(vid),
2224 0, SD_INODE_SIZE - sizeof(inode.data_vdi_id), 0,
2225 s->cache_flags);
2226
2227 if (ret) {
2228 continue;
2229 }
2230
2231 if (!strcmp(inode.name, s->name) && is_snapshot(&inode)) {
2232 sn_tab[found].date_sec = inode.snap_ctime >> 32;
2233 sn_tab[found].date_nsec = inode.snap_ctime & 0xffffffff;
2234 sn_tab[found].vm_state_size = inode.vm_state_size;
2235 sn_tab[found].vm_clock_nsec = inode.vm_clock_nsec;
2236
2237 snprintf(sn_tab[found].id_str, sizeof(sn_tab[found].id_str), "%u",
2238 inode.snap_id);
2239 pstrcpy(sn_tab[found].name,
2240 MIN(sizeof(sn_tab[found].name), sizeof(inode.tag)),
2241 inode.tag);
2242 found++;
2243 }
2244 }
2245
2246 closesocket(fd);
2247 out:
2248 *psn_tab = sn_tab;
2249
2250 g_free(vdi_inuse);
2251
2252 if (ret < 0) {
2253 return ret;
2254 }
2255
2256 return found;
2257 }
2258
2259 static int do_load_save_vmstate(BDRVSheepdogState *s, uint8_t *data,
2260 int64_t pos, int size, int load)
2261 {
2262 bool create;
2263 int fd, ret = 0, remaining = size;
2264 unsigned int data_len;
2265 uint64_t vmstate_oid;
2266 uint64_t offset;
2267 uint32_t vdi_index;
2268 uint32_t vdi_id = load ? s->inode.parent_vdi_id : s->inode.vdi_id;
2269
2270 fd = connect_to_sdog(s);
2271 if (fd < 0) {
2272 return fd;
2273 }
2274
2275 while (remaining) {
2276 vdi_index = pos / SD_DATA_OBJ_SIZE;
2277 offset = pos % SD_DATA_OBJ_SIZE;
2278
2279 data_len = MIN(remaining, SD_DATA_OBJ_SIZE - offset);
2280
2281 vmstate_oid = vid_to_vmstate_oid(vdi_id, vdi_index);
2282
2283 create = (offset == 0);
2284 if (load) {
2285 ret = read_object(fd, (char *)data, vmstate_oid,
2286 s->inode.nr_copies, data_len, offset,
2287 s->cache_flags);
2288 } else {
2289 ret = write_object(fd, (char *)data, vmstate_oid,
2290 s->inode.nr_copies, data_len, offset, create,
2291 s->cache_flags);
2292 }
2293
2294 if (ret < 0) {
2295 error_report("failed to save vmstate %s", strerror(errno));
2296 goto cleanup;
2297 }
2298
2299 pos += data_len;
2300 data += data_len;
2301 remaining -= data_len;
2302 }
2303 ret = size;
2304 cleanup:
2305 closesocket(fd);
2306 return ret;
2307 }
2308
2309 static int sd_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
2310 int64_t pos)
2311 {
2312 BDRVSheepdogState *s = bs->opaque;
2313 void *buf;
2314 int ret;
2315
2316 buf = qemu_blockalign(bs, qiov->size);
2317 qemu_iovec_to_buf(qiov, 0, buf, qiov->size);
2318 ret = do_load_save_vmstate(s, (uint8_t *) buf, pos, qiov->size, 0);
2319 qemu_vfree(buf);
2320
2321 return ret;
2322 }
2323
2324 static int sd_load_vmstate(BlockDriverState *bs, uint8_t *data,
2325 int64_t pos, int size)
2326 {
2327 BDRVSheepdogState *s = bs->opaque;
2328
2329 return do_load_save_vmstate(s, data, pos, size, 1);
2330 }
2331
2332
2333 static coroutine_fn int sd_co_discard(BlockDriverState *bs, int64_t sector_num,
2334 int nb_sectors)
2335 {
2336 SheepdogAIOCB *acb;
2337 QEMUIOVector dummy;
2338 BDRVSheepdogState *s = bs->opaque;
2339 int ret;
2340
2341 if (!s->discard_supported) {
2342 return 0;
2343 }
2344
2345 acb = sd_aio_setup(bs, &dummy, sector_num, nb_sectors);
2346 acb->aiocb_type = AIOCB_DISCARD_OBJ;
2347 acb->aio_done_func = sd_finish_aiocb;
2348
2349 ret = sd_co_rw_vector(acb);
2350 if (ret <= 0) {
2351 qemu_aio_release(acb);
2352 return ret;
2353 }
2354
2355 qemu_coroutine_yield();
2356
2357 return acb->ret;
2358 }
2359
2360 static coroutine_fn int64_t
2361 sd_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
2362 int *pnum)
2363 {
2364 BDRVSheepdogState *s = bs->opaque;
2365 SheepdogInode *inode = &s->inode;
2366 unsigned long start = sector_num * BDRV_SECTOR_SIZE / SD_DATA_OBJ_SIZE,
2367 end = DIV_ROUND_UP((sector_num + nb_sectors) *
2368 BDRV_SECTOR_SIZE, SD_DATA_OBJ_SIZE);
2369 unsigned long idx;
2370 int64_t ret = BDRV_BLOCK_DATA;
2371
2372 for (idx = start; idx < end; idx++) {
2373 if (inode->data_vdi_id[idx] == 0) {
2374 break;
2375 }
2376 }
2377 if (idx == start) {
2378 /* Get the longest length of unallocated sectors */
2379 ret = 0;
2380 for (idx = start + 1; idx < end; idx++) {
2381 if (inode->data_vdi_id[idx] != 0) {
2382 break;
2383 }
2384 }
2385 }
2386
2387 *pnum = (idx - start) * SD_DATA_OBJ_SIZE / BDRV_SECTOR_SIZE;
2388 if (*pnum > nb_sectors) {
2389 *pnum = nb_sectors;
2390 }
2391 return ret;
2392 }
2393
2394 static QEMUOptionParameter sd_create_options[] = {
2395 {
2396 .name = BLOCK_OPT_SIZE,
2397 .type = OPT_SIZE,
2398 .help = "Virtual disk size"
2399 },
2400 {
2401 .name = BLOCK_OPT_BACKING_FILE,
2402 .type = OPT_STRING,
2403 .help = "File name of a base image"
2404 },
2405 {
2406 .name = BLOCK_OPT_PREALLOC,
2407 .type = OPT_STRING,
2408 .help = "Preallocation mode (allowed values: off, full)"
2409 },
2410 { NULL }
2411 };
2412
2413 static BlockDriver bdrv_sheepdog = {
2414 .format_name = "sheepdog",
2415 .protocol_name = "sheepdog",
2416 .instance_size = sizeof(BDRVSheepdogState),
2417 .bdrv_needs_filename = true,
2418 .bdrv_file_open = sd_open,
2419 .bdrv_close = sd_close,
2420 .bdrv_create = sd_create,
2421 .bdrv_has_zero_init = bdrv_has_zero_init_1,
2422 .bdrv_getlength = sd_getlength,
2423 .bdrv_truncate = sd_truncate,
2424
2425 .bdrv_co_readv = sd_co_readv,
2426 .bdrv_co_writev = sd_co_writev,
2427 .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
2428 .bdrv_co_discard = sd_co_discard,
2429 .bdrv_co_get_block_status = sd_co_get_block_status,
2430
2431 .bdrv_snapshot_create = sd_snapshot_create,
2432 .bdrv_snapshot_goto = sd_snapshot_goto,
2433 .bdrv_snapshot_delete = sd_snapshot_delete,
2434 .bdrv_snapshot_list = sd_snapshot_list,
2435
2436 .bdrv_save_vmstate = sd_save_vmstate,
2437 .bdrv_load_vmstate = sd_load_vmstate,
2438
2439 .create_options = sd_create_options,
2440 };
2441
2442 static BlockDriver bdrv_sheepdog_tcp = {
2443 .format_name = "sheepdog",
2444 .protocol_name = "sheepdog+tcp",
2445 .instance_size = sizeof(BDRVSheepdogState),
2446 .bdrv_needs_filename = true,
2447 .bdrv_file_open = sd_open,
2448 .bdrv_close = sd_close,
2449 .bdrv_create = sd_create,
2450 .bdrv_has_zero_init = bdrv_has_zero_init_1,
2451 .bdrv_getlength = sd_getlength,
2452 .bdrv_truncate = sd_truncate,
2453
2454 .bdrv_co_readv = sd_co_readv,
2455 .bdrv_co_writev = sd_co_writev,
2456 .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
2457 .bdrv_co_discard = sd_co_discard,
2458 .bdrv_co_get_block_status = sd_co_get_block_status,
2459
2460 .bdrv_snapshot_create = sd_snapshot_create,
2461 .bdrv_snapshot_goto = sd_snapshot_goto,
2462 .bdrv_snapshot_delete = sd_snapshot_delete,
2463 .bdrv_snapshot_list = sd_snapshot_list,
2464
2465 .bdrv_save_vmstate = sd_save_vmstate,
2466 .bdrv_load_vmstate = sd_load_vmstate,
2467
2468 .create_options = sd_create_options,
2469 };
2470
2471 static BlockDriver bdrv_sheepdog_unix = {
2472 .format_name = "sheepdog",
2473 .protocol_name = "sheepdog+unix",
2474 .instance_size = sizeof(BDRVSheepdogState),
2475 .bdrv_needs_filename = true,
2476 .bdrv_file_open = sd_open,
2477 .bdrv_close = sd_close,
2478 .bdrv_create = sd_create,
2479 .bdrv_has_zero_init = bdrv_has_zero_init_1,
2480 .bdrv_getlength = sd_getlength,
2481 .bdrv_truncate = sd_truncate,
2482
2483 .bdrv_co_readv = sd_co_readv,
2484 .bdrv_co_writev = sd_co_writev,
2485 .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
2486 .bdrv_co_discard = sd_co_discard,
2487 .bdrv_co_get_block_status = sd_co_get_block_status,
2488
2489 .bdrv_snapshot_create = sd_snapshot_create,
2490 .bdrv_snapshot_goto = sd_snapshot_goto,
2491 .bdrv_snapshot_delete = sd_snapshot_delete,
2492 .bdrv_snapshot_list = sd_snapshot_list,
2493
2494 .bdrv_save_vmstate = sd_save_vmstate,
2495 .bdrv_load_vmstate = sd_load_vmstate,
2496
2497 .create_options = sd_create_options,
2498 };
2499
2500 static void bdrv_sheepdog_init(void)
2501 {
2502 bdrv_register(&bdrv_sheepdog);
2503 bdrv_register(&bdrv_sheepdog_tcp);
2504 bdrv_register(&bdrv_sheepdog_unix);
2505 }
2506 block_init(bdrv_sheepdog_init);