]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2009-2010 Nippon Telegraph and Telephone Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License version | |
6 | * 2 as published by the Free Software Foundation. | |
7 | * | |
8 | * You should have received a copy of the GNU General Public License | |
9 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
10 | * | |
11 | * Contributions after 2012-01-13 are licensed under the terms of the | |
12 | * GNU GPL, version 2 or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #include "qemu-common.h" | |
16 | #include "qemu/uri.h" | |
17 | #include "qemu/error-report.h" | |
18 | #include "qemu/sockets.h" | |
19 | #include "block/block_int.h" | |
20 | #include "qemu/bitops.h" | |
21 | ||
22 | #define SD_PROTO_VER 0x01 | |
23 | ||
24 | #define SD_DEFAULT_ADDR "localhost" | |
25 | #define SD_DEFAULT_PORT 7000 | |
26 | ||
27 | #define SD_OP_CREATE_AND_WRITE_OBJ 0x01 | |
28 | #define SD_OP_READ_OBJ 0x02 | |
29 | #define SD_OP_WRITE_OBJ 0x03 | |
30 | /* 0x04 is used internally by Sheepdog */ | |
31 | #define SD_OP_DISCARD_OBJ 0x05 | |
32 | ||
33 | #define SD_OP_NEW_VDI 0x11 | |
34 | #define SD_OP_LOCK_VDI 0x12 | |
35 | #define SD_OP_RELEASE_VDI 0x13 | |
36 | #define SD_OP_GET_VDI_INFO 0x14 | |
37 | #define SD_OP_READ_VDIS 0x15 | |
38 | #define SD_OP_FLUSH_VDI 0x16 | |
39 | #define SD_OP_DEL_VDI 0x17 | |
40 | ||
41 | #define SD_FLAG_CMD_WRITE 0x01 | |
42 | #define SD_FLAG_CMD_COW 0x02 | |
43 | #define SD_FLAG_CMD_CACHE 0x04 /* Writeback mode for cache */ | |
44 | #define SD_FLAG_CMD_DIRECT 0x08 /* Don't use cache */ | |
45 | ||
46 | #define SD_RES_SUCCESS 0x00 /* Success */ | |
47 | #define SD_RES_UNKNOWN 0x01 /* Unknown error */ | |
48 | #define SD_RES_NO_OBJ 0x02 /* No object found */ | |
49 | #define SD_RES_EIO 0x03 /* I/O error */ | |
50 | #define SD_RES_VDI_EXIST 0x04 /* Vdi exists already */ | |
51 | #define SD_RES_INVALID_PARMS 0x05 /* Invalid parameters */ | |
52 | #define SD_RES_SYSTEM_ERROR 0x06 /* System error */ | |
53 | #define SD_RES_VDI_LOCKED 0x07 /* Vdi is locked */ | |
54 | #define SD_RES_NO_VDI 0x08 /* No vdi found */ | |
55 | #define SD_RES_NO_BASE_VDI 0x09 /* No base vdi found */ | |
56 | #define SD_RES_VDI_READ 0x0A /* Cannot read requested vdi */ | |
57 | #define SD_RES_VDI_WRITE 0x0B /* Cannot write requested vdi */ | |
58 | #define SD_RES_BASE_VDI_READ 0x0C /* Cannot read base vdi */ | |
59 | #define SD_RES_BASE_VDI_WRITE 0x0D /* Cannot write base vdi */ | |
60 | #define SD_RES_NO_TAG 0x0E /* Requested tag is not found */ | |
61 | #define SD_RES_STARTUP 0x0F /* Sheepdog is on starting up */ | |
62 | #define SD_RES_VDI_NOT_LOCKED 0x10 /* Vdi is not locked */ | |
63 | #define SD_RES_SHUTDOWN 0x11 /* Sheepdog is shutting down */ | |
64 | #define SD_RES_NO_MEM 0x12 /* Cannot allocate memory */ | |
65 | #define SD_RES_FULL_VDI 0x13 /* we already have the maximum vdis */ | |
66 | #define SD_RES_VER_MISMATCH 0x14 /* Protocol version mismatch */ | |
67 | #define SD_RES_NO_SPACE 0x15 /* Server has no room for new objects */ | |
68 | #define SD_RES_WAIT_FOR_FORMAT 0x16 /* Waiting for a format operation */ | |
69 | #define SD_RES_WAIT_FOR_JOIN 0x17 /* Waiting for other nodes joining */ | |
70 | #define SD_RES_JOIN_FAILED 0x18 /* Target node had failed to join sheepdog */ | |
71 | #define SD_RES_HALT 0x19 /* Sheepdog is stopped serving IO request */ | |
72 | #define SD_RES_READONLY 0x1A /* Object is read-only */ | |
73 | ||
74 | /* | |
75 | * Object ID rules | |
76 | * | |
77 | * 0 - 19 (20 bits): data object space | |
78 | * 20 - 31 (12 bits): reserved data object space | |
79 | * 32 - 55 (24 bits): vdi object space | |
80 | * 56 - 59 ( 4 bits): reserved vdi object space | |
81 | * 60 - 63 ( 4 bits): object type identifier space | |
82 | */ | |
83 | ||
84 | #define VDI_SPACE_SHIFT 32 | |
85 | #define VDI_BIT (UINT64_C(1) << 63) | |
86 | #define VMSTATE_BIT (UINT64_C(1) << 62) | |
87 | #define MAX_DATA_OBJS (UINT64_C(1) << 20) | |
88 | #define MAX_CHILDREN 1024 | |
89 | #define SD_MAX_VDI_LEN 256 | |
90 | #define SD_MAX_VDI_TAG_LEN 256 | |
91 | #define SD_NR_VDIS (1U << 24) | |
92 | #define SD_DATA_OBJ_SIZE (UINT64_C(1) << 22) | |
93 | #define SD_MAX_VDI_SIZE (SD_DATA_OBJ_SIZE * MAX_DATA_OBJS) | |
94 | /* | |
95 | * For erasure coding, we use at most SD_EC_MAX_STRIP for data strips and | |
96 | * (SD_EC_MAX_STRIP - 1) for parity strips | |
97 | * | |
98 | * SD_MAX_COPIES is sum of number of data strips and parity strips. | |
99 | */ | |
100 | #define SD_EC_MAX_STRIP 16 | |
101 | #define SD_MAX_COPIES (SD_EC_MAX_STRIP * 2 - 1) | |
102 | ||
103 | #define SD_INODE_SIZE (sizeof(SheepdogInode)) | |
104 | #define CURRENT_VDI_ID 0 | |
105 | ||
106 | #define LOCK_TYPE_NORMAL 0 | |
107 | #define LOCK_TYPE_SHARED 1 /* for iSCSI multipath */ | |
108 | ||
109 | typedef struct SheepdogReq { | |
110 | uint8_t proto_ver; | |
111 | uint8_t opcode; | |
112 | uint16_t flags; | |
113 | uint32_t epoch; | |
114 | uint32_t id; | |
115 | uint32_t data_length; | |
116 | uint32_t opcode_specific[8]; | |
117 | } SheepdogReq; | |
118 | ||
119 | typedef struct SheepdogRsp { | |
120 | uint8_t proto_ver; | |
121 | uint8_t opcode; | |
122 | uint16_t flags; | |
123 | uint32_t epoch; | |
124 | uint32_t id; | |
125 | uint32_t data_length; | |
126 | uint32_t result; | |
127 | uint32_t opcode_specific[7]; | |
128 | } SheepdogRsp; | |
129 | ||
130 | typedef struct SheepdogObjReq { | |
131 | uint8_t proto_ver; | |
132 | uint8_t opcode; | |
133 | uint16_t flags; | |
134 | uint32_t epoch; | |
135 | uint32_t id; | |
136 | uint32_t data_length; | |
137 | uint64_t oid; | |
138 | uint64_t cow_oid; | |
139 | uint8_t copies; | |
140 | uint8_t copy_policy; | |
141 | uint8_t reserved[6]; | |
142 | uint64_t offset; | |
143 | } SheepdogObjReq; | |
144 | ||
145 | typedef struct SheepdogObjRsp { | |
146 | uint8_t proto_ver; | |
147 | uint8_t opcode; | |
148 | uint16_t flags; | |
149 | uint32_t epoch; | |
150 | uint32_t id; | |
151 | uint32_t data_length; | |
152 | uint32_t result; | |
153 | uint8_t copies; | |
154 | uint8_t copy_policy; | |
155 | uint8_t reserved[2]; | |
156 | uint32_t pad[6]; | |
157 | } SheepdogObjRsp; | |
158 | ||
159 | typedef struct SheepdogVdiReq { | |
160 | uint8_t proto_ver; | |
161 | uint8_t opcode; | |
162 | uint16_t flags; | |
163 | uint32_t epoch; | |
164 | uint32_t id; | |
165 | uint32_t data_length; | |
166 | uint64_t vdi_size; | |
167 | uint32_t base_vdi_id; | |
168 | uint8_t copies; | |
169 | uint8_t copy_policy; | |
170 | uint8_t reserved[2]; | |
171 | uint32_t snapid; | |
172 | uint32_t type; | |
173 | uint32_t pad[2]; | |
174 | } SheepdogVdiReq; | |
175 | ||
176 | typedef struct SheepdogVdiRsp { | |
177 | uint8_t proto_ver; | |
178 | uint8_t opcode; | |
179 | uint16_t flags; | |
180 | uint32_t epoch; | |
181 | uint32_t id; | |
182 | uint32_t data_length; | |
183 | uint32_t result; | |
184 | uint32_t rsvd; | |
185 | uint32_t vdi_id; | |
186 | uint32_t pad[5]; | |
187 | } SheepdogVdiRsp; | |
188 | ||
189 | typedef struct SheepdogInode { | |
190 | char name[SD_MAX_VDI_LEN]; | |
191 | char tag[SD_MAX_VDI_TAG_LEN]; | |
192 | uint64_t ctime; | |
193 | uint64_t snap_ctime; | |
194 | uint64_t vm_clock_nsec; | |
195 | uint64_t vdi_size; | |
196 | uint64_t vm_state_size; | |
197 | uint16_t copy_policy; | |
198 | uint8_t nr_copies; | |
199 | uint8_t block_size_shift; | |
200 | uint32_t snap_id; | |
201 | uint32_t vdi_id; | |
202 | uint32_t parent_vdi_id; | |
203 | uint32_t child_vdi_id[MAX_CHILDREN]; | |
204 | uint32_t data_vdi_id[MAX_DATA_OBJS]; | |
205 | } SheepdogInode; | |
206 | ||
207 | #define SD_INODE_HEADER_SIZE offsetof(SheepdogInode, data_vdi_id) | |
208 | ||
209 | /* | |
210 | * 64 bit FNV-1a non-zero initial basis | |
211 | */ | |
212 | #define FNV1A_64_INIT ((uint64_t)0xcbf29ce484222325ULL) | |
213 | ||
214 | /* | |
215 | * 64 bit Fowler/Noll/Vo FNV-1a hash code | |
216 | */ | |
217 | static inline uint64_t fnv_64a_buf(void *buf, size_t len, uint64_t hval) | |
218 | { | |
219 | unsigned char *bp = buf; | |
220 | unsigned char *be = bp + len; | |
221 | while (bp < be) { | |
222 | hval ^= (uint64_t) *bp++; | |
223 | hval += (hval << 1) + (hval << 4) + (hval << 5) + | |
224 | (hval << 7) + (hval << 8) + (hval << 40); | |
225 | } | |
226 | return hval; | |
227 | } | |
228 | ||
229 | static inline bool is_data_obj_writable(SheepdogInode *inode, unsigned int idx) | |
230 | { | |
231 | return inode->vdi_id == inode->data_vdi_id[idx]; | |
232 | } | |
233 | ||
234 | static inline bool is_data_obj(uint64_t oid) | |
235 | { | |
236 | return !(VDI_BIT & oid); | |
237 | } | |
238 | ||
239 | static inline uint64_t data_oid_to_idx(uint64_t oid) | |
240 | { | |
241 | return oid & (MAX_DATA_OBJS - 1); | |
242 | } | |
243 | ||
244 | static inline uint32_t oid_to_vid(uint64_t oid) | |
245 | { | |
246 | return (oid & ~VDI_BIT) >> VDI_SPACE_SHIFT; | |
247 | } | |
248 | ||
249 | static inline uint64_t vid_to_vdi_oid(uint32_t vid) | |
250 | { | |
251 | return VDI_BIT | ((uint64_t)vid << VDI_SPACE_SHIFT); | |
252 | } | |
253 | ||
254 | static inline uint64_t vid_to_vmstate_oid(uint32_t vid, uint32_t idx) | |
255 | { | |
256 | return VMSTATE_BIT | ((uint64_t)vid << VDI_SPACE_SHIFT) | idx; | |
257 | } | |
258 | ||
259 | static inline uint64_t vid_to_data_oid(uint32_t vid, uint32_t idx) | |
260 | { | |
261 | return ((uint64_t)vid << VDI_SPACE_SHIFT) | idx; | |
262 | } | |
263 | ||
264 | static inline bool is_snapshot(struct SheepdogInode *inode) | |
265 | { | |
266 | return !!inode->snap_ctime; | |
267 | } | |
268 | ||
269 | #undef DPRINTF | |
270 | #ifdef DEBUG_SDOG | |
271 | #define DPRINTF(fmt, args...) \ | |
272 | do { \ | |
273 | fprintf(stdout, "%s %d: " fmt, __func__, __LINE__, ##args); \ | |
274 | } while (0) | |
275 | #else | |
276 | #define DPRINTF(fmt, args...) | |
277 | #endif | |
278 | ||
279 | typedef struct SheepdogAIOCB SheepdogAIOCB; | |
280 | ||
281 | typedef struct AIOReq { | |
282 | SheepdogAIOCB *aiocb; | |
283 | unsigned int iov_offset; | |
284 | ||
285 | uint64_t oid; | |
286 | uint64_t base_oid; | |
287 | uint64_t offset; | |
288 | unsigned int data_len; | |
289 | uint8_t flags; | |
290 | uint32_t id; | |
291 | bool create; | |
292 | ||
293 | QLIST_ENTRY(AIOReq) aio_siblings; | |
294 | } AIOReq; | |
295 | ||
296 | enum AIOCBState { | |
297 | AIOCB_WRITE_UDATA, | |
298 | AIOCB_READ_UDATA, | |
299 | AIOCB_FLUSH_CACHE, | |
300 | AIOCB_DISCARD_OBJ, | |
301 | }; | |
302 | ||
303 | struct SheepdogAIOCB { | |
304 | BlockDriverAIOCB common; | |
305 | ||
306 | QEMUIOVector *qiov; | |
307 | ||
308 | int64_t sector_num; | |
309 | int nb_sectors; | |
310 | ||
311 | int ret; | |
312 | enum AIOCBState aiocb_type; | |
313 | ||
314 | Coroutine *coroutine; | |
315 | void (*aio_done_func)(SheepdogAIOCB *); | |
316 | ||
317 | bool cancelable; | |
318 | bool *finished; | |
319 | int nr_pending; | |
320 | }; | |
321 | ||
322 | typedef struct BDRVSheepdogState { | |
323 | BlockDriverState *bs; | |
324 | AioContext *aio_context; | |
325 | ||
326 | SheepdogInode inode; | |
327 | ||
328 | uint32_t min_dirty_data_idx; | |
329 | uint32_t max_dirty_data_idx; | |
330 | ||
331 | char name[SD_MAX_VDI_LEN]; | |
332 | bool is_snapshot; | |
333 | uint32_t cache_flags; | |
334 | bool discard_supported; | |
335 | ||
336 | char *host_spec; | |
337 | bool is_unix; | |
338 | int fd; | |
339 | ||
340 | CoMutex lock; | |
341 | Coroutine *co_send; | |
342 | Coroutine *co_recv; | |
343 | ||
344 | uint32_t aioreq_seq_num; | |
345 | ||
346 | /* Every aio request must be linked to either of these queues. */ | |
347 | QLIST_HEAD(inflight_aio_head, AIOReq) inflight_aio_head; | |
348 | QLIST_HEAD(pending_aio_head, AIOReq) pending_aio_head; | |
349 | QLIST_HEAD(failed_aio_head, AIOReq) failed_aio_head; | |
350 | } BDRVSheepdogState; | |
351 | ||
352 | static const char * sd_strerror(int err) | |
353 | { | |
354 | int i; | |
355 | ||
356 | static const struct { | |
357 | int err; | |
358 | const char *desc; | |
359 | } errors[] = { | |
360 | {SD_RES_SUCCESS, "Success"}, | |
361 | {SD_RES_UNKNOWN, "Unknown error"}, | |
362 | {SD_RES_NO_OBJ, "No object found"}, | |
363 | {SD_RES_EIO, "I/O error"}, | |
364 | {SD_RES_VDI_EXIST, "VDI exists already"}, | |
365 | {SD_RES_INVALID_PARMS, "Invalid parameters"}, | |
366 | {SD_RES_SYSTEM_ERROR, "System error"}, | |
367 | {SD_RES_VDI_LOCKED, "VDI is already locked"}, | |
368 | {SD_RES_NO_VDI, "No vdi found"}, | |
369 | {SD_RES_NO_BASE_VDI, "No base VDI found"}, | |
370 | {SD_RES_VDI_READ, "Failed read the requested VDI"}, | |
371 | {SD_RES_VDI_WRITE, "Failed to write the requested VDI"}, | |
372 | {SD_RES_BASE_VDI_READ, "Failed to read the base VDI"}, | |
373 | {SD_RES_BASE_VDI_WRITE, "Failed to write the base VDI"}, | |
374 | {SD_RES_NO_TAG, "Failed to find the requested tag"}, | |
375 | {SD_RES_STARTUP, "The system is still booting"}, | |
376 | {SD_RES_VDI_NOT_LOCKED, "VDI isn't locked"}, | |
377 | {SD_RES_SHUTDOWN, "The system is shutting down"}, | |
378 | {SD_RES_NO_MEM, "Out of memory on the server"}, | |
379 | {SD_RES_FULL_VDI, "We already have the maximum vdis"}, | |
380 | {SD_RES_VER_MISMATCH, "Protocol version mismatch"}, | |
381 | {SD_RES_NO_SPACE, "Server has no space for new objects"}, | |
382 | {SD_RES_WAIT_FOR_FORMAT, "Sheepdog is waiting for a format operation"}, | |
383 | {SD_RES_WAIT_FOR_JOIN, "Sheepdog is waiting for other nodes joining"}, | |
384 | {SD_RES_JOIN_FAILED, "Target node had failed to join sheepdog"}, | |
385 | {SD_RES_HALT, "Sheepdog is stopped serving IO request"}, | |
386 | {SD_RES_READONLY, "Object is read-only"}, | |
387 | }; | |
388 | ||
389 | for (i = 0; i < ARRAY_SIZE(errors); ++i) { | |
390 | if (errors[i].err == err) { | |
391 | return errors[i].desc; | |
392 | } | |
393 | } | |
394 | ||
395 | return "Invalid error code"; | |
396 | } | |
397 | ||
398 | /* | |
399 | * Sheepdog I/O handling: | |
400 | * | |
401 | * 1. In sd_co_rw_vector, we send the I/O requests to the server and | |
402 | * link the requests to the inflight_list in the | |
403 | * BDRVSheepdogState. The function exits without waiting for | |
404 | * receiving the response. | |
405 | * | |
406 | * 2. We receive the response in aio_read_response, the fd handler to | |
407 | * the sheepdog connection. If metadata update is needed, we send | |
408 | * the write request to the vdi object in sd_write_done, the write | |
409 | * completion function. We switch back to sd_co_readv/writev after | |
410 | * all the requests belonging to the AIOCB are finished. | |
411 | */ | |
412 | ||
413 | static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb, | |
414 | uint64_t oid, unsigned int data_len, | |
415 | uint64_t offset, uint8_t flags, bool create, | |
416 | uint64_t base_oid, unsigned int iov_offset) | |
417 | { | |
418 | AIOReq *aio_req; | |
419 | ||
420 | aio_req = g_malloc(sizeof(*aio_req)); | |
421 | aio_req->aiocb = acb; | |
422 | aio_req->iov_offset = iov_offset; | |
423 | aio_req->oid = oid; | |
424 | aio_req->base_oid = base_oid; | |
425 | aio_req->offset = offset; | |
426 | aio_req->data_len = data_len; | |
427 | aio_req->flags = flags; | |
428 | aio_req->id = s->aioreq_seq_num++; | |
429 | aio_req->create = create; | |
430 | ||
431 | acb->nr_pending++; | |
432 | return aio_req; | |
433 | } | |
434 | ||
435 | static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req) | |
436 | { | |
437 | SheepdogAIOCB *acb = aio_req->aiocb; | |
438 | ||
439 | acb->cancelable = false; | |
440 | QLIST_REMOVE(aio_req, aio_siblings); | |
441 | g_free(aio_req); | |
442 | ||
443 | acb->nr_pending--; | |
444 | } | |
445 | ||
446 | static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb) | |
447 | { | |
448 | qemu_coroutine_enter(acb->coroutine, NULL); | |
449 | if (acb->finished) { | |
450 | *acb->finished = true; | |
451 | } | |
452 | qemu_aio_release(acb); | |
453 | } | |
454 | ||
455 | /* | |
456 | * Check whether the specified acb can be canceled | |
457 | * | |
458 | * We can cancel aio when any request belonging to the acb is: | |
459 | * - Not processed by the sheepdog server. | |
460 | * - Not linked to the inflight queue. | |
461 | */ | |
462 | static bool sd_acb_cancelable(const SheepdogAIOCB *acb) | |
463 | { | |
464 | BDRVSheepdogState *s = acb->common.bs->opaque; | |
465 | AIOReq *aioreq; | |
466 | ||
467 | if (!acb->cancelable) { | |
468 | return false; | |
469 | } | |
470 | ||
471 | QLIST_FOREACH(aioreq, &s->inflight_aio_head, aio_siblings) { | |
472 | if (aioreq->aiocb == acb) { | |
473 | return false; | |
474 | } | |
475 | } | |
476 | ||
477 | return true; | |
478 | } | |
479 | ||
480 | static void sd_aio_cancel(BlockDriverAIOCB *blockacb) | |
481 | { | |
482 | SheepdogAIOCB *acb = (SheepdogAIOCB *)blockacb; | |
483 | BDRVSheepdogState *s = acb->common.bs->opaque; | |
484 | AIOReq *aioreq, *next; | |
485 | bool finished = false; | |
486 | ||
487 | acb->finished = &finished; | |
488 | while (!finished) { | |
489 | if (sd_acb_cancelable(acb)) { | |
490 | /* Remove outstanding requests from pending and failed queues. */ | |
491 | QLIST_FOREACH_SAFE(aioreq, &s->pending_aio_head, aio_siblings, | |
492 | next) { | |
493 | if (aioreq->aiocb == acb) { | |
494 | free_aio_req(s, aioreq); | |
495 | } | |
496 | } | |
497 | QLIST_FOREACH_SAFE(aioreq, &s->failed_aio_head, aio_siblings, | |
498 | next) { | |
499 | if (aioreq->aiocb == acb) { | |
500 | free_aio_req(s, aioreq); | |
501 | } | |
502 | } | |
503 | ||
504 | assert(acb->nr_pending == 0); | |
505 | sd_finish_aiocb(acb); | |
506 | return; | |
507 | } | |
508 | aio_poll(s->aio_context, true); | |
509 | } | |
510 | } | |
511 | ||
512 | static const AIOCBInfo sd_aiocb_info = { | |
513 | .aiocb_size = sizeof(SheepdogAIOCB), | |
514 | .cancel = sd_aio_cancel, | |
515 | }; | |
516 | ||
517 | static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov, | |
518 | int64_t sector_num, int nb_sectors) | |
519 | { | |
520 | SheepdogAIOCB *acb; | |
521 | ||
522 | acb = qemu_aio_get(&sd_aiocb_info, bs, NULL, NULL); | |
523 | ||
524 | acb->qiov = qiov; | |
525 | ||
526 | acb->sector_num = sector_num; | |
527 | acb->nb_sectors = nb_sectors; | |
528 | ||
529 | acb->aio_done_func = NULL; | |
530 | acb->cancelable = true; | |
531 | acb->finished = NULL; | |
532 | acb->coroutine = qemu_coroutine_self(); | |
533 | acb->ret = 0; | |
534 | acb->nr_pending = 0; | |
535 | return acb; | |
536 | } | |
537 | ||
538 | static int connect_to_sdog(BDRVSheepdogState *s, Error **errp) | |
539 | { | |
540 | int fd; | |
541 | ||
542 | if (s->is_unix) { | |
543 | fd = unix_connect(s->host_spec, errp); | |
544 | } else { | |
545 | fd = inet_connect(s->host_spec, errp); | |
546 | ||
547 | if (fd >= 0) { | |
548 | int ret = socket_set_nodelay(fd); | |
549 | if (ret < 0) { | |
550 | error_report("%s", strerror(errno)); | |
551 | } | |
552 | } | |
553 | } | |
554 | ||
555 | if (fd >= 0) { | |
556 | qemu_set_nonblock(fd); | |
557 | } | |
558 | ||
559 | return fd; | |
560 | } | |
561 | ||
562 | static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data, | |
563 | unsigned int *wlen) | |
564 | { | |
565 | int ret; | |
566 | ||
567 | ret = qemu_co_send(sockfd, hdr, sizeof(*hdr)); | |
568 | if (ret != sizeof(*hdr)) { | |
569 | error_report("failed to send a req, %s", strerror(errno)); | |
570 | return ret; | |
571 | } | |
572 | ||
573 | ret = qemu_co_send(sockfd, data, *wlen); | |
574 | if (ret != *wlen) { | |
575 | error_report("failed to send a req, %s", strerror(errno)); | |
576 | } | |
577 | ||
578 | return ret; | |
579 | } | |
580 | ||
581 | static void restart_co_req(void *opaque) | |
582 | { | |
583 | Coroutine *co = opaque; | |
584 | ||
585 | qemu_coroutine_enter(co, NULL); | |
586 | } | |
587 | ||
588 | typedef struct SheepdogReqCo { | |
589 | int sockfd; | |
590 | AioContext *aio_context; | |
591 | SheepdogReq *hdr; | |
592 | void *data; | |
593 | unsigned int *wlen; | |
594 | unsigned int *rlen; | |
595 | int ret; | |
596 | bool finished; | |
597 | } SheepdogReqCo; | |
598 | ||
599 | static coroutine_fn void do_co_req(void *opaque) | |
600 | { | |
601 | int ret; | |
602 | Coroutine *co; | |
603 | SheepdogReqCo *srco = opaque; | |
604 | int sockfd = srco->sockfd; | |
605 | SheepdogReq *hdr = srco->hdr; | |
606 | void *data = srco->data; | |
607 | unsigned int *wlen = srco->wlen; | |
608 | unsigned int *rlen = srco->rlen; | |
609 | ||
610 | co = qemu_coroutine_self(); | |
611 | aio_set_fd_handler(srco->aio_context, sockfd, NULL, restart_co_req, co); | |
612 | ||
613 | ret = send_co_req(sockfd, hdr, data, wlen); | |
614 | if (ret < 0) { | |
615 | goto out; | |
616 | } | |
617 | ||
618 | aio_set_fd_handler(srco->aio_context, sockfd, restart_co_req, NULL, co); | |
619 | ||
620 | ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr)); | |
621 | if (ret != sizeof(*hdr)) { | |
622 | error_report("failed to get a rsp, %s", strerror(errno)); | |
623 | ret = -errno; | |
624 | goto out; | |
625 | } | |
626 | ||
627 | if (*rlen > hdr->data_length) { | |
628 | *rlen = hdr->data_length; | |
629 | } | |
630 | ||
631 | if (*rlen) { | |
632 | ret = qemu_co_recv(sockfd, data, *rlen); | |
633 | if (ret != *rlen) { | |
634 | error_report("failed to get the data, %s", strerror(errno)); | |
635 | ret = -errno; | |
636 | goto out; | |
637 | } | |
638 | } | |
639 | ret = 0; | |
640 | out: | |
641 | /* there is at most one request for this sockfd, so it is safe to | |
642 | * set each handler to NULL. */ | |
643 | aio_set_fd_handler(srco->aio_context, sockfd, NULL, NULL, NULL); | |
644 | ||
645 | srco->ret = ret; | |
646 | srco->finished = true; | |
647 | } | |
648 | ||
649 | static int do_req(int sockfd, AioContext *aio_context, SheepdogReq *hdr, | |
650 | void *data, unsigned int *wlen, unsigned int *rlen) | |
651 | { | |
652 | Coroutine *co; | |
653 | SheepdogReqCo srco = { | |
654 | .sockfd = sockfd, | |
655 | .aio_context = aio_context, | |
656 | .hdr = hdr, | |
657 | .data = data, | |
658 | .wlen = wlen, | |
659 | .rlen = rlen, | |
660 | .ret = 0, | |
661 | .finished = false, | |
662 | }; | |
663 | ||
664 | if (qemu_in_coroutine()) { | |
665 | do_co_req(&srco); | |
666 | } else { | |
667 | co = qemu_coroutine_create(do_co_req); | |
668 | qemu_coroutine_enter(co, &srco); | |
669 | while (!srco.finished) { | |
670 | aio_poll(aio_context, true); | |
671 | } | |
672 | } | |
673 | ||
674 | return srco.ret; | |
675 | } | |
676 | ||
677 | static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, | |
678 | struct iovec *iov, int niov, | |
679 | enum AIOCBState aiocb_type); | |
680 | static void coroutine_fn resend_aioreq(BDRVSheepdogState *s, AIOReq *aio_req); | |
681 | static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag); | |
682 | static int get_sheep_fd(BDRVSheepdogState *s, Error **errp); | |
683 | static void co_write_request(void *opaque); | |
684 | ||
685 | static AIOReq *find_pending_req(BDRVSheepdogState *s, uint64_t oid) | |
686 | { | |
687 | AIOReq *aio_req; | |
688 | ||
689 | QLIST_FOREACH(aio_req, &s->pending_aio_head, aio_siblings) { | |
690 | if (aio_req->oid == oid) { | |
691 | return aio_req; | |
692 | } | |
693 | } | |
694 | ||
695 | return NULL; | |
696 | } | |
697 | ||
698 | /* | |
699 | * This function searchs pending requests to the object `oid', and | |
700 | * sends them. | |
701 | */ | |
702 | static void coroutine_fn send_pending_req(BDRVSheepdogState *s, uint64_t oid) | |
703 | { | |
704 | AIOReq *aio_req; | |
705 | SheepdogAIOCB *acb; | |
706 | ||
707 | while ((aio_req = find_pending_req(s, oid)) != NULL) { | |
708 | acb = aio_req->aiocb; | |
709 | /* move aio_req from pending list to inflight one */ | |
710 | QLIST_REMOVE(aio_req, aio_siblings); | |
711 | QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | |
712 | add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov, | |
713 | acb->aiocb_type); | |
714 | } | |
715 | } | |
716 | ||
717 | static coroutine_fn void reconnect_to_sdog(void *opaque) | |
718 | { | |
719 | BDRVSheepdogState *s = opaque; | |
720 | AIOReq *aio_req, *next; | |
721 | ||
722 | aio_set_fd_handler(s->aio_context, s->fd, NULL, NULL, NULL); | |
723 | close(s->fd); | |
724 | s->fd = -1; | |
725 | ||
726 | /* Wait for outstanding write requests to be completed. */ | |
727 | while (s->co_send != NULL) { | |
728 | co_write_request(opaque); | |
729 | } | |
730 | ||
731 | /* Try to reconnect the sheepdog server every one second. */ | |
732 | while (s->fd < 0) { | |
733 | Error *local_err = NULL; | |
734 | s->fd = get_sheep_fd(s, &local_err); | |
735 | if (s->fd < 0) { | |
736 | DPRINTF("Wait for connection to be established\n"); | |
737 | error_report("%s", error_get_pretty(local_err)); | |
738 | error_free(local_err); | |
739 | co_aio_sleep_ns(bdrv_get_aio_context(s->bs), QEMU_CLOCK_REALTIME, | |
740 | 1000000000ULL); | |
741 | } | |
742 | }; | |
743 | ||
744 | /* | |
745 | * Now we have to resend all the request in the inflight queue. However, | |
746 | * resend_aioreq() can yield and newly created requests can be added to the | |
747 | * inflight queue before the coroutine is resumed. To avoid mixing them, we | |
748 | * have to move all the inflight requests to the failed queue before | |
749 | * resend_aioreq() is called. | |
750 | */ | |
751 | QLIST_FOREACH_SAFE(aio_req, &s->inflight_aio_head, aio_siblings, next) { | |
752 | QLIST_REMOVE(aio_req, aio_siblings); | |
753 | QLIST_INSERT_HEAD(&s->failed_aio_head, aio_req, aio_siblings); | |
754 | } | |
755 | ||
756 | /* Resend all the failed aio requests. */ | |
757 | while (!QLIST_EMPTY(&s->failed_aio_head)) { | |
758 | aio_req = QLIST_FIRST(&s->failed_aio_head); | |
759 | QLIST_REMOVE(aio_req, aio_siblings); | |
760 | QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | |
761 | resend_aioreq(s, aio_req); | |
762 | } | |
763 | } | |
764 | ||
765 | /* | |
766 | * Receive responses of the I/O requests. | |
767 | * | |
768 | * This function is registered as a fd handler, and called from the | |
769 | * main loop when s->fd is ready for reading responses. | |
770 | */ | |
771 | static void coroutine_fn aio_read_response(void *opaque) | |
772 | { | |
773 | SheepdogObjRsp rsp; | |
774 | BDRVSheepdogState *s = opaque; | |
775 | int fd = s->fd; | |
776 | int ret; | |
777 | AIOReq *aio_req = NULL; | |
778 | SheepdogAIOCB *acb; | |
779 | uint64_t idx; | |
780 | ||
781 | /* read a header */ | |
782 | ret = qemu_co_recv(fd, &rsp, sizeof(rsp)); | |
783 | if (ret != sizeof(rsp)) { | |
784 | error_report("failed to get the header, %s", strerror(errno)); | |
785 | goto err; | |
786 | } | |
787 | ||
788 | /* find the right aio_req from the inflight aio list */ | |
789 | QLIST_FOREACH(aio_req, &s->inflight_aio_head, aio_siblings) { | |
790 | if (aio_req->id == rsp.id) { | |
791 | break; | |
792 | } | |
793 | } | |
794 | if (!aio_req) { | |
795 | error_report("cannot find aio_req %x", rsp.id); | |
796 | goto err; | |
797 | } | |
798 | ||
799 | acb = aio_req->aiocb; | |
800 | ||
801 | switch (acb->aiocb_type) { | |
802 | case AIOCB_WRITE_UDATA: | |
803 | /* this coroutine context is no longer suitable for co_recv | |
804 | * because we may send data to update vdi objects */ | |
805 | s->co_recv = NULL; | |
806 | if (!is_data_obj(aio_req->oid)) { | |
807 | break; | |
808 | } | |
809 | idx = data_oid_to_idx(aio_req->oid); | |
810 | ||
811 | if (aio_req->create) { | |
812 | /* | |
813 | * If the object is newly created one, we need to update | |
814 | * the vdi object (metadata object). min_dirty_data_idx | |
815 | * and max_dirty_data_idx are changed to include updated | |
816 | * index between them. | |
817 | */ | |
818 | if (rsp.result == SD_RES_SUCCESS) { | |
819 | s->inode.data_vdi_id[idx] = s->inode.vdi_id; | |
820 | s->max_dirty_data_idx = MAX(idx, s->max_dirty_data_idx); | |
821 | s->min_dirty_data_idx = MIN(idx, s->min_dirty_data_idx); | |
822 | } | |
823 | /* | |
824 | * Some requests may be blocked because simultaneous | |
825 | * create requests are not allowed, so we search the | |
826 | * pending requests here. | |
827 | */ | |
828 | send_pending_req(s, aio_req->oid); | |
829 | } | |
830 | break; | |
831 | case AIOCB_READ_UDATA: | |
832 | ret = qemu_co_recvv(fd, acb->qiov->iov, acb->qiov->niov, | |
833 | aio_req->iov_offset, rsp.data_length); | |
834 | if (ret != rsp.data_length) { | |
835 | error_report("failed to get the data, %s", strerror(errno)); | |
836 | goto err; | |
837 | } | |
838 | break; | |
839 | case AIOCB_FLUSH_CACHE: | |
840 | if (rsp.result == SD_RES_INVALID_PARMS) { | |
841 | DPRINTF("disable cache since the server doesn't support it\n"); | |
842 | s->cache_flags = SD_FLAG_CMD_DIRECT; | |
843 | rsp.result = SD_RES_SUCCESS; | |
844 | } | |
845 | break; | |
846 | case AIOCB_DISCARD_OBJ: | |
847 | switch (rsp.result) { | |
848 | case SD_RES_INVALID_PARMS: | |
849 | error_report("sheep(%s) doesn't support discard command", | |
850 | s->host_spec); | |
851 | rsp.result = SD_RES_SUCCESS; | |
852 | s->discard_supported = false; | |
853 | break; | |
854 | case SD_RES_SUCCESS: | |
855 | idx = data_oid_to_idx(aio_req->oid); | |
856 | s->inode.data_vdi_id[idx] = 0; | |
857 | break; | |
858 | default: | |
859 | break; | |
860 | } | |
861 | } | |
862 | ||
863 | switch (rsp.result) { | |
864 | case SD_RES_SUCCESS: | |
865 | break; | |
866 | case SD_RES_READONLY: | |
867 | if (s->inode.vdi_id == oid_to_vid(aio_req->oid)) { | |
868 | ret = reload_inode(s, 0, ""); | |
869 | if (ret < 0) { | |
870 | goto err; | |
871 | } | |
872 | } | |
873 | if (is_data_obj(aio_req->oid)) { | |
874 | aio_req->oid = vid_to_data_oid(s->inode.vdi_id, | |
875 | data_oid_to_idx(aio_req->oid)); | |
876 | } else { | |
877 | aio_req->oid = vid_to_vdi_oid(s->inode.vdi_id); | |
878 | } | |
879 | resend_aioreq(s, aio_req); | |
880 | goto out; | |
881 | default: | |
882 | acb->ret = -EIO; | |
883 | error_report("%s", sd_strerror(rsp.result)); | |
884 | break; | |
885 | } | |
886 | ||
887 | free_aio_req(s, aio_req); | |
888 | if (!acb->nr_pending) { | |
889 | /* | |
890 | * We've finished all requests which belong to the AIOCB, so | |
891 | * we can switch back to sd_co_readv/writev now. | |
892 | */ | |
893 | acb->aio_done_func(acb); | |
894 | } | |
895 | out: | |
896 | s->co_recv = NULL; | |
897 | return; | |
898 | err: | |
899 | s->co_recv = NULL; | |
900 | reconnect_to_sdog(opaque); | |
901 | } | |
902 | ||
903 | static void co_read_response(void *opaque) | |
904 | { | |
905 | BDRVSheepdogState *s = opaque; | |
906 | ||
907 | if (!s->co_recv) { | |
908 | s->co_recv = qemu_coroutine_create(aio_read_response); | |
909 | } | |
910 | ||
911 | qemu_coroutine_enter(s->co_recv, opaque); | |
912 | } | |
913 | ||
914 | static void co_write_request(void *opaque) | |
915 | { | |
916 | BDRVSheepdogState *s = opaque; | |
917 | ||
918 | qemu_coroutine_enter(s->co_send, NULL); | |
919 | } | |
920 | ||
921 | /* | |
922 | * Return a socket descriptor to read/write objects. | |
923 | * | |
924 | * We cannot use this descriptor for other operations because | |
925 | * the block driver may be on waiting response from the server. | |
926 | */ | |
927 | static int get_sheep_fd(BDRVSheepdogState *s, Error **errp) | |
928 | { | |
929 | int fd; | |
930 | ||
931 | fd = connect_to_sdog(s, errp); | |
932 | if (fd < 0) { | |
933 | return fd; | |
934 | } | |
935 | ||
936 | aio_set_fd_handler(s->aio_context, fd, co_read_response, NULL, s); | |
937 | return fd; | |
938 | } | |
939 | ||
940 | static int sd_parse_uri(BDRVSheepdogState *s, const char *filename, | |
941 | char *vdi, uint32_t *snapid, char *tag) | |
942 | { | |
943 | URI *uri; | |
944 | QueryParams *qp = NULL; | |
945 | int ret = 0; | |
946 | ||
947 | uri = uri_parse(filename); | |
948 | if (!uri) { | |
949 | return -EINVAL; | |
950 | } | |
951 | ||
952 | /* transport */ | |
953 | if (!strcmp(uri->scheme, "sheepdog")) { | |
954 | s->is_unix = false; | |
955 | } else if (!strcmp(uri->scheme, "sheepdog+tcp")) { | |
956 | s->is_unix = false; | |
957 | } else if (!strcmp(uri->scheme, "sheepdog+unix")) { | |
958 | s->is_unix = true; | |
959 | } else { | |
960 | ret = -EINVAL; | |
961 | goto out; | |
962 | } | |
963 | ||
964 | if (uri->path == NULL || !strcmp(uri->path, "/")) { | |
965 | ret = -EINVAL; | |
966 | goto out; | |
967 | } | |
968 | pstrcpy(vdi, SD_MAX_VDI_LEN, uri->path + 1); | |
969 | ||
970 | qp = query_params_parse(uri->query); | |
971 | if (qp->n > 1 || (s->is_unix && !qp->n) || (!s->is_unix && qp->n)) { | |
972 | ret = -EINVAL; | |
973 | goto out; | |
974 | } | |
975 | ||
976 | if (s->is_unix) { | |
977 | /* sheepdog+unix:///vdiname?socket=path */ | |
978 | if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) { | |
979 | ret = -EINVAL; | |
980 | goto out; | |
981 | } | |
982 | s->host_spec = g_strdup(qp->p[0].value); | |
983 | } else { | |
984 | /* sheepdog[+tcp]://[host:port]/vdiname */ | |
985 | s->host_spec = g_strdup_printf("%s:%d", uri->server ?: SD_DEFAULT_ADDR, | |
986 | uri->port ?: SD_DEFAULT_PORT); | |
987 | } | |
988 | ||
989 | /* snapshot tag */ | |
990 | if (uri->fragment) { | |
991 | *snapid = strtoul(uri->fragment, NULL, 10); | |
992 | if (*snapid == 0) { | |
993 | pstrcpy(tag, SD_MAX_VDI_TAG_LEN, uri->fragment); | |
994 | } | |
995 | } else { | |
996 | *snapid = CURRENT_VDI_ID; /* search current vdi */ | |
997 | } | |
998 | ||
999 | out: | |
1000 | if (qp) { | |
1001 | query_params_free(qp); | |
1002 | } | |
1003 | uri_free(uri); | |
1004 | return ret; | |
1005 | } | |
1006 | ||
1007 | /* | |
1008 | * Parse a filename (old syntax) | |
1009 | * | |
1010 | * filename must be one of the following formats: | |
1011 | * 1. [vdiname] | |
1012 | * 2. [vdiname]:[snapid] | |
1013 | * 3. [vdiname]:[tag] | |
1014 | * 4. [hostname]:[port]:[vdiname] | |
1015 | * 5. [hostname]:[port]:[vdiname]:[snapid] | |
1016 | * 6. [hostname]:[port]:[vdiname]:[tag] | |
1017 | * | |
1018 | * You can boot from the snapshot images by specifying `snapid` or | |
1019 | * `tag'. | |
1020 | * | |
1021 | * You can run VMs outside the Sheepdog cluster by specifying | |
1022 | * `hostname' and `port' (experimental). | |
1023 | */ | |
1024 | static int parse_vdiname(BDRVSheepdogState *s, const char *filename, | |
1025 | char *vdi, uint32_t *snapid, char *tag) | |
1026 | { | |
1027 | char *p, *q, *uri; | |
1028 | const char *host_spec, *vdi_spec; | |
1029 | int nr_sep, ret; | |
1030 | ||
1031 | strstart(filename, "sheepdog:", (const char **)&filename); | |
1032 | p = q = g_strdup(filename); | |
1033 | ||
1034 | /* count the number of separators */ | |
1035 | nr_sep = 0; | |
1036 | while (*p) { | |
1037 | if (*p == ':') { | |
1038 | nr_sep++; | |
1039 | } | |
1040 | p++; | |
1041 | } | |
1042 | p = q; | |
1043 | ||
1044 | /* use the first two tokens as host_spec. */ | |
1045 | if (nr_sep >= 2) { | |
1046 | host_spec = p; | |
1047 | p = strchr(p, ':'); | |
1048 | p++; | |
1049 | p = strchr(p, ':'); | |
1050 | *p++ = '\0'; | |
1051 | } else { | |
1052 | host_spec = ""; | |
1053 | } | |
1054 | ||
1055 | vdi_spec = p; | |
1056 | ||
1057 | p = strchr(vdi_spec, ':'); | |
1058 | if (p) { | |
1059 | *p++ = '#'; | |
1060 | } | |
1061 | ||
1062 | uri = g_strdup_printf("sheepdog://%s/%s", host_spec, vdi_spec); | |
1063 | ||
1064 | ret = sd_parse_uri(s, uri, vdi, snapid, tag); | |
1065 | ||
1066 | g_free(q); | |
1067 | g_free(uri); | |
1068 | ||
1069 | return ret; | |
1070 | } | |
1071 | ||
1072 | static int find_vdi_name(BDRVSheepdogState *s, const char *filename, | |
1073 | uint32_t snapid, const char *tag, uint32_t *vid, | |
1074 | bool lock, Error **errp) | |
1075 | { | |
1076 | int ret, fd; | |
1077 | SheepdogVdiReq hdr; | |
1078 | SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr; | |
1079 | unsigned int wlen, rlen = 0; | |
1080 | char buf[SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN]; | |
1081 | ||
1082 | fd = connect_to_sdog(s, errp); | |
1083 | if (fd < 0) { | |
1084 | return fd; | |
1085 | } | |
1086 | ||
1087 | /* This pair of strncpy calls ensures that the buffer is zero-filled, | |
1088 | * which is desirable since we'll soon be sending those bytes, and | |
1089 | * don't want the send_req to read uninitialized data. | |
1090 | */ | |
1091 | strncpy(buf, filename, SD_MAX_VDI_LEN); | |
1092 | strncpy(buf + SD_MAX_VDI_LEN, tag, SD_MAX_VDI_TAG_LEN); | |
1093 | ||
1094 | memset(&hdr, 0, sizeof(hdr)); | |
1095 | if (lock) { | |
1096 | hdr.opcode = SD_OP_LOCK_VDI; | |
1097 | hdr.type = LOCK_TYPE_NORMAL; | |
1098 | } else { | |
1099 | hdr.opcode = SD_OP_GET_VDI_INFO; | |
1100 | } | |
1101 | wlen = SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN; | |
1102 | hdr.proto_ver = SD_PROTO_VER; | |
1103 | hdr.data_length = wlen; | |
1104 | hdr.snapid = snapid; | |
1105 | hdr.flags = SD_FLAG_CMD_WRITE; | |
1106 | ||
1107 | ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr, buf, &wlen, &rlen); | |
1108 | if (ret) { | |
1109 | error_setg_errno(errp, -ret, "cannot get vdi info"); | |
1110 | goto out; | |
1111 | } | |
1112 | ||
1113 | if (rsp->result != SD_RES_SUCCESS) { | |
1114 | error_setg(errp, "cannot get vdi info, %s, %s %" PRIu32 " %s", | |
1115 | sd_strerror(rsp->result), filename, snapid, tag); | |
1116 | if (rsp->result == SD_RES_NO_VDI) { | |
1117 | ret = -ENOENT; | |
1118 | } else if (rsp->result == SD_RES_VDI_LOCKED) { | |
1119 | ret = -EBUSY; | |
1120 | } else { | |
1121 | ret = -EIO; | |
1122 | } | |
1123 | goto out; | |
1124 | } | |
1125 | *vid = rsp->vdi_id; | |
1126 | ||
1127 | ret = 0; | |
1128 | out: | |
1129 | closesocket(fd); | |
1130 | return ret; | |
1131 | } | |
1132 | ||
1133 | static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, | |
1134 | struct iovec *iov, int niov, | |
1135 | enum AIOCBState aiocb_type) | |
1136 | { | |
1137 | int nr_copies = s->inode.nr_copies; | |
1138 | SheepdogObjReq hdr; | |
1139 | unsigned int wlen = 0; | |
1140 | int ret; | |
1141 | uint64_t oid = aio_req->oid; | |
1142 | unsigned int datalen = aio_req->data_len; | |
1143 | uint64_t offset = aio_req->offset; | |
1144 | uint8_t flags = aio_req->flags; | |
1145 | uint64_t old_oid = aio_req->base_oid; | |
1146 | bool create = aio_req->create; | |
1147 | ||
1148 | if (!nr_copies) { | |
1149 | error_report("bug"); | |
1150 | } | |
1151 | ||
1152 | memset(&hdr, 0, sizeof(hdr)); | |
1153 | ||
1154 | switch (aiocb_type) { | |
1155 | case AIOCB_FLUSH_CACHE: | |
1156 | hdr.opcode = SD_OP_FLUSH_VDI; | |
1157 | break; | |
1158 | case AIOCB_READ_UDATA: | |
1159 | hdr.opcode = SD_OP_READ_OBJ; | |
1160 | hdr.flags = flags; | |
1161 | break; | |
1162 | case AIOCB_WRITE_UDATA: | |
1163 | if (create) { | |
1164 | hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ; | |
1165 | } else { | |
1166 | hdr.opcode = SD_OP_WRITE_OBJ; | |
1167 | } | |
1168 | wlen = datalen; | |
1169 | hdr.flags = SD_FLAG_CMD_WRITE | flags; | |
1170 | break; | |
1171 | case AIOCB_DISCARD_OBJ: | |
1172 | hdr.opcode = SD_OP_DISCARD_OBJ; | |
1173 | break; | |
1174 | } | |
1175 | ||
1176 | if (s->cache_flags) { | |
1177 | hdr.flags |= s->cache_flags; | |
1178 | } | |
1179 | ||
1180 | hdr.oid = oid; | |
1181 | hdr.cow_oid = old_oid; | |
1182 | hdr.copies = s->inode.nr_copies; | |
1183 | ||
1184 | hdr.data_length = datalen; | |
1185 | hdr.offset = offset; | |
1186 | ||
1187 | hdr.id = aio_req->id; | |
1188 | ||
1189 | qemu_co_mutex_lock(&s->lock); | |
1190 | s->co_send = qemu_coroutine_self(); | |
1191 | aio_set_fd_handler(s->aio_context, s->fd, | |
1192 | co_read_response, co_write_request, s); | |
1193 | socket_set_cork(s->fd, 1); | |
1194 | ||
1195 | /* send a header */ | |
1196 | ret = qemu_co_send(s->fd, &hdr, sizeof(hdr)); | |
1197 | if (ret != sizeof(hdr)) { | |
1198 | error_report("failed to send a req, %s", strerror(errno)); | |
1199 | goto out; | |
1200 | } | |
1201 | ||
1202 | if (wlen) { | |
1203 | ret = qemu_co_sendv(s->fd, iov, niov, aio_req->iov_offset, wlen); | |
1204 | if (ret != wlen) { | |
1205 | error_report("failed to send a data, %s", strerror(errno)); | |
1206 | } | |
1207 | } | |
1208 | out: | |
1209 | socket_set_cork(s->fd, 0); | |
1210 | aio_set_fd_handler(s->aio_context, s->fd, co_read_response, NULL, s); | |
1211 | s->co_send = NULL; | |
1212 | qemu_co_mutex_unlock(&s->lock); | |
1213 | } | |
1214 | ||
1215 | static int read_write_object(int fd, AioContext *aio_context, char *buf, | |
1216 | uint64_t oid, uint8_t copies, | |
1217 | unsigned int datalen, uint64_t offset, | |
1218 | bool write, bool create, uint32_t cache_flags) | |
1219 | { | |
1220 | SheepdogObjReq hdr; | |
1221 | SheepdogObjRsp *rsp = (SheepdogObjRsp *)&hdr; | |
1222 | unsigned int wlen, rlen; | |
1223 | int ret; | |
1224 | ||
1225 | memset(&hdr, 0, sizeof(hdr)); | |
1226 | ||
1227 | if (write) { | |
1228 | wlen = datalen; | |
1229 | rlen = 0; | |
1230 | hdr.flags = SD_FLAG_CMD_WRITE; | |
1231 | if (create) { | |
1232 | hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ; | |
1233 | } else { | |
1234 | hdr.opcode = SD_OP_WRITE_OBJ; | |
1235 | } | |
1236 | } else { | |
1237 | wlen = 0; | |
1238 | rlen = datalen; | |
1239 | hdr.opcode = SD_OP_READ_OBJ; | |
1240 | } | |
1241 | ||
1242 | hdr.flags |= cache_flags; | |
1243 | ||
1244 | hdr.oid = oid; | |
1245 | hdr.data_length = datalen; | |
1246 | hdr.offset = offset; | |
1247 | hdr.copies = copies; | |
1248 | ||
1249 | ret = do_req(fd, aio_context, (SheepdogReq *)&hdr, buf, &wlen, &rlen); | |
1250 | if (ret) { | |
1251 | error_report("failed to send a request to the sheep"); | |
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | switch (rsp->result) { | |
1256 | case SD_RES_SUCCESS: | |
1257 | return 0; | |
1258 | default: | |
1259 | error_report("%s", sd_strerror(rsp->result)); | |
1260 | return -EIO; | |
1261 | } | |
1262 | } | |
1263 | ||
1264 | static int read_object(int fd, AioContext *aio_context, char *buf, | |
1265 | uint64_t oid, uint8_t copies, | |
1266 | unsigned int datalen, uint64_t offset, | |
1267 | uint32_t cache_flags) | |
1268 | { | |
1269 | return read_write_object(fd, aio_context, buf, oid, copies, | |
1270 | datalen, offset, false, | |
1271 | false, cache_flags); | |
1272 | } | |
1273 | ||
1274 | static int write_object(int fd, AioContext *aio_context, char *buf, | |
1275 | uint64_t oid, uint8_t copies, | |
1276 | unsigned int datalen, uint64_t offset, bool create, | |
1277 | uint32_t cache_flags) | |
1278 | { | |
1279 | return read_write_object(fd, aio_context, buf, oid, copies, | |
1280 | datalen, offset, true, | |
1281 | create, cache_flags); | |
1282 | } | |
1283 | ||
1284 | /* update inode with the latest state */ | |
1285 | static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag) | |
1286 | { | |
1287 | Error *local_err = NULL; | |
1288 | SheepdogInode *inode; | |
1289 | int ret = 0, fd; | |
1290 | uint32_t vid = 0; | |
1291 | ||
1292 | fd = connect_to_sdog(s, &local_err); | |
1293 | if (fd < 0) { | |
1294 | error_report("%s", error_get_pretty(local_err));; | |
1295 | error_free(local_err); | |
1296 | return -EIO; | |
1297 | } | |
1298 | ||
1299 | inode = g_malloc(SD_INODE_HEADER_SIZE); | |
1300 | ||
1301 | ret = find_vdi_name(s, s->name, snapid, tag, &vid, false, &local_err); | |
1302 | if (ret) { | |
1303 | error_report("%s", error_get_pretty(local_err));; | |
1304 | error_free(local_err); | |
1305 | goto out; | |
1306 | } | |
1307 | ||
1308 | ret = read_object(fd, s->aio_context, (char *)inode, vid_to_vdi_oid(vid), | |
1309 | s->inode.nr_copies, SD_INODE_HEADER_SIZE, 0, | |
1310 | s->cache_flags); | |
1311 | if (ret < 0) { | |
1312 | goto out; | |
1313 | } | |
1314 | ||
1315 | if (inode->vdi_id != s->inode.vdi_id) { | |
1316 | memcpy(&s->inode, inode, SD_INODE_HEADER_SIZE); | |
1317 | } | |
1318 | ||
1319 | out: | |
1320 | g_free(inode); | |
1321 | closesocket(fd); | |
1322 | ||
1323 | return ret; | |
1324 | } | |
1325 | ||
1326 | /* Return true if the specified request is linked to the pending list. */ | |
1327 | static bool check_simultaneous_create(BDRVSheepdogState *s, AIOReq *aio_req) | |
1328 | { | |
1329 | AIOReq *areq; | |
1330 | QLIST_FOREACH(areq, &s->inflight_aio_head, aio_siblings) { | |
1331 | if (areq != aio_req && areq->oid == aio_req->oid) { | |
1332 | /* | |
1333 | * Sheepdog cannot handle simultaneous create requests to the same | |
1334 | * object, so we cannot send the request until the previous request | |
1335 | * finishes. | |
1336 | */ | |
1337 | DPRINTF("simultaneous create to %" PRIx64 "\n", aio_req->oid); | |
1338 | aio_req->flags = 0; | |
1339 | aio_req->base_oid = 0; | |
1340 | aio_req->create = false; | |
1341 | QLIST_REMOVE(aio_req, aio_siblings); | |
1342 | QLIST_INSERT_HEAD(&s->pending_aio_head, aio_req, aio_siblings); | |
1343 | return true; | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | return false; | |
1348 | } | |
1349 | ||
1350 | static void coroutine_fn resend_aioreq(BDRVSheepdogState *s, AIOReq *aio_req) | |
1351 | { | |
1352 | SheepdogAIOCB *acb = aio_req->aiocb; | |
1353 | ||
1354 | aio_req->create = false; | |
1355 | ||
1356 | /* check whether this request becomes a CoW one */ | |
1357 | if (acb->aiocb_type == AIOCB_WRITE_UDATA && is_data_obj(aio_req->oid)) { | |
1358 | int idx = data_oid_to_idx(aio_req->oid); | |
1359 | ||
1360 | if (is_data_obj_writable(&s->inode, idx)) { | |
1361 | goto out; | |
1362 | } | |
1363 | ||
1364 | if (check_simultaneous_create(s, aio_req)) { | |
1365 | return; | |
1366 | } | |
1367 | ||
1368 | if (s->inode.data_vdi_id[idx]) { | |
1369 | aio_req->base_oid = vid_to_data_oid(s->inode.data_vdi_id[idx], idx); | |
1370 | aio_req->flags |= SD_FLAG_CMD_COW; | |
1371 | } | |
1372 | aio_req->create = true; | |
1373 | } | |
1374 | out: | |
1375 | if (is_data_obj(aio_req->oid)) { | |
1376 | add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov, | |
1377 | acb->aiocb_type); | |
1378 | } else { | |
1379 | struct iovec iov; | |
1380 | iov.iov_base = &s->inode; | |
1381 | iov.iov_len = sizeof(s->inode); | |
1382 | add_aio_request(s, aio_req, &iov, 1, AIOCB_WRITE_UDATA); | |
1383 | } | |
1384 | } | |
1385 | ||
1386 | static void sd_detach_aio_context(BlockDriverState *bs) | |
1387 | { | |
1388 | BDRVSheepdogState *s = bs->opaque; | |
1389 | ||
1390 | aio_set_fd_handler(s->aio_context, s->fd, NULL, NULL, NULL); | |
1391 | } | |
1392 | ||
1393 | static void sd_attach_aio_context(BlockDriverState *bs, | |
1394 | AioContext *new_context) | |
1395 | { | |
1396 | BDRVSheepdogState *s = bs->opaque; | |
1397 | ||
1398 | s->aio_context = new_context; | |
1399 | aio_set_fd_handler(new_context, s->fd, co_read_response, NULL, s); | |
1400 | } | |
1401 | ||
1402 | /* TODO Convert to fine grained options */ | |
1403 | static QemuOptsList runtime_opts = { | |
1404 | .name = "sheepdog", | |
1405 | .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), | |
1406 | .desc = { | |
1407 | { | |
1408 | .name = "filename", | |
1409 | .type = QEMU_OPT_STRING, | |
1410 | .help = "URL to the sheepdog image", | |
1411 | }, | |
1412 | { /* end of list */ } | |
1413 | }, | |
1414 | }; | |
1415 | ||
1416 | static int sd_open(BlockDriverState *bs, QDict *options, int flags, | |
1417 | Error **errp) | |
1418 | { | |
1419 | int ret, fd; | |
1420 | uint32_t vid = 0; | |
1421 | BDRVSheepdogState *s = bs->opaque; | |
1422 | char vdi[SD_MAX_VDI_LEN], tag[SD_MAX_VDI_TAG_LEN]; | |
1423 | uint32_t snapid; | |
1424 | char *buf = NULL; | |
1425 | QemuOpts *opts; | |
1426 | Error *local_err = NULL; | |
1427 | const char *filename; | |
1428 | ||
1429 | s->bs = bs; | |
1430 | s->aio_context = bdrv_get_aio_context(bs); | |
1431 | ||
1432 | opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); | |
1433 | qemu_opts_absorb_qdict(opts, options, &local_err); | |
1434 | if (local_err) { | |
1435 | error_propagate(errp, local_err); | |
1436 | ret = -EINVAL; | |
1437 | goto out; | |
1438 | } | |
1439 | ||
1440 | filename = qemu_opt_get(opts, "filename"); | |
1441 | ||
1442 | QLIST_INIT(&s->inflight_aio_head); | |
1443 | QLIST_INIT(&s->pending_aio_head); | |
1444 | QLIST_INIT(&s->failed_aio_head); | |
1445 | s->fd = -1; | |
1446 | ||
1447 | memset(vdi, 0, sizeof(vdi)); | |
1448 | memset(tag, 0, sizeof(tag)); | |
1449 | ||
1450 | if (strstr(filename, "://")) { | |
1451 | ret = sd_parse_uri(s, filename, vdi, &snapid, tag); | |
1452 | } else { | |
1453 | ret = parse_vdiname(s, filename, vdi, &snapid, tag); | |
1454 | } | |
1455 | if (ret < 0) { | |
1456 | error_setg(errp, "Can't parse filename"); | |
1457 | goto out; | |
1458 | } | |
1459 | s->fd = get_sheep_fd(s, errp); | |
1460 | if (s->fd < 0) { | |
1461 | ret = s->fd; | |
1462 | goto out; | |
1463 | } | |
1464 | ||
1465 | ret = find_vdi_name(s, vdi, snapid, tag, &vid, true, errp); | |
1466 | if (ret) { | |
1467 | goto out; | |
1468 | } | |
1469 | ||
1470 | /* | |
1471 | * QEMU block layer emulates writethrough cache as 'writeback + flush', so | |
1472 | * we always set SD_FLAG_CMD_CACHE (writeback cache) as default. | |
1473 | */ | |
1474 | s->cache_flags = SD_FLAG_CMD_CACHE; | |
1475 | if (flags & BDRV_O_NOCACHE) { | |
1476 | s->cache_flags = SD_FLAG_CMD_DIRECT; | |
1477 | } | |
1478 | s->discard_supported = true; | |
1479 | ||
1480 | if (snapid || tag[0] != '\0') { | |
1481 | DPRINTF("%" PRIx32 " snapshot inode was open.\n", vid); | |
1482 | s->is_snapshot = true; | |
1483 | } | |
1484 | ||
1485 | fd = connect_to_sdog(s, errp); | |
1486 | if (fd < 0) { | |
1487 | ret = fd; | |
1488 | goto out; | |
1489 | } | |
1490 | ||
1491 | buf = g_malloc(SD_INODE_SIZE); | |
1492 | ret = read_object(fd, s->aio_context, buf, vid_to_vdi_oid(vid), | |
1493 | 0, SD_INODE_SIZE, 0, s->cache_flags); | |
1494 | ||
1495 | closesocket(fd); | |
1496 | ||
1497 | if (ret) { | |
1498 | error_setg(errp, "Can't read snapshot inode"); | |
1499 | goto out; | |
1500 | } | |
1501 | ||
1502 | memcpy(&s->inode, buf, sizeof(s->inode)); | |
1503 | s->min_dirty_data_idx = UINT32_MAX; | |
1504 | s->max_dirty_data_idx = 0; | |
1505 | ||
1506 | bs->total_sectors = s->inode.vdi_size / BDRV_SECTOR_SIZE; | |
1507 | pstrcpy(s->name, sizeof(s->name), vdi); | |
1508 | qemu_co_mutex_init(&s->lock); | |
1509 | qemu_opts_del(opts); | |
1510 | g_free(buf); | |
1511 | return 0; | |
1512 | out: | |
1513 | aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd, NULL, NULL, NULL); | |
1514 | if (s->fd >= 0) { | |
1515 | closesocket(s->fd); | |
1516 | } | |
1517 | qemu_opts_del(opts); | |
1518 | g_free(buf); | |
1519 | return ret; | |
1520 | } | |
1521 | ||
1522 | static int do_sd_create(BDRVSheepdogState *s, uint32_t *vdi_id, int snapshot, | |
1523 | Error **errp) | |
1524 | { | |
1525 | SheepdogVdiReq hdr; | |
1526 | SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr; | |
1527 | int fd, ret; | |
1528 | unsigned int wlen, rlen = 0; | |
1529 | char buf[SD_MAX_VDI_LEN]; | |
1530 | ||
1531 | fd = connect_to_sdog(s, errp); | |
1532 | if (fd < 0) { | |
1533 | return fd; | |
1534 | } | |
1535 | ||
1536 | /* FIXME: would it be better to fail (e.g., return -EIO) when filename | |
1537 | * does not fit in buf? For now, just truncate and avoid buffer overrun. | |
1538 | */ | |
1539 | memset(buf, 0, sizeof(buf)); | |
1540 | pstrcpy(buf, sizeof(buf), s->name); | |
1541 | ||
1542 | memset(&hdr, 0, sizeof(hdr)); | |
1543 | hdr.opcode = SD_OP_NEW_VDI; | |
1544 | hdr.base_vdi_id = s->inode.vdi_id; | |
1545 | ||
1546 | wlen = SD_MAX_VDI_LEN; | |
1547 | ||
1548 | hdr.flags = SD_FLAG_CMD_WRITE; | |
1549 | hdr.snapid = snapshot; | |
1550 | ||
1551 | hdr.data_length = wlen; | |
1552 | hdr.vdi_size = s->inode.vdi_size; | |
1553 | hdr.copy_policy = s->inode.copy_policy; | |
1554 | hdr.copies = s->inode.nr_copies; | |
1555 | ||
1556 | ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr, buf, &wlen, &rlen); | |
1557 | ||
1558 | closesocket(fd); | |
1559 | ||
1560 | if (ret) { | |
1561 | error_setg_errno(errp, -ret, "create failed"); | |
1562 | return ret; | |
1563 | } | |
1564 | ||
1565 | if (rsp->result != SD_RES_SUCCESS) { | |
1566 | error_setg(errp, "%s, %s", sd_strerror(rsp->result), s->inode.name); | |
1567 | return -EIO; | |
1568 | } | |
1569 | ||
1570 | if (vdi_id) { | |
1571 | *vdi_id = rsp->vdi_id; | |
1572 | } | |
1573 | ||
1574 | return 0; | |
1575 | } | |
1576 | ||
1577 | static int sd_prealloc(const char *filename, Error **errp) | |
1578 | { | |
1579 | BlockDriverState *bs = NULL; | |
1580 | uint32_t idx, max_idx; | |
1581 | int64_t vdi_size; | |
1582 | void *buf = g_malloc0(SD_DATA_OBJ_SIZE); | |
1583 | int ret; | |
1584 | ||
1585 | ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, | |
1586 | NULL, errp); | |
1587 | if (ret < 0) { | |
1588 | goto out_with_err_set; | |
1589 | } | |
1590 | ||
1591 | vdi_size = bdrv_getlength(bs); | |
1592 | if (vdi_size < 0) { | |
1593 | ret = vdi_size; | |
1594 | goto out; | |
1595 | } | |
1596 | max_idx = DIV_ROUND_UP(vdi_size, SD_DATA_OBJ_SIZE); | |
1597 | ||
1598 | for (idx = 0; idx < max_idx; idx++) { | |
1599 | /* | |
1600 | * The created image can be a cloned image, so we need to read | |
1601 | * a data from the source image. | |
1602 | */ | |
1603 | ret = bdrv_pread(bs, idx * SD_DATA_OBJ_SIZE, buf, SD_DATA_OBJ_SIZE); | |
1604 | if (ret < 0) { | |
1605 | goto out; | |
1606 | } | |
1607 | ret = bdrv_pwrite(bs, idx * SD_DATA_OBJ_SIZE, buf, SD_DATA_OBJ_SIZE); | |
1608 | if (ret < 0) { | |
1609 | goto out; | |
1610 | } | |
1611 | } | |
1612 | ||
1613 | out: | |
1614 | if (ret < 0) { | |
1615 | error_setg_errno(errp, -ret, "Can't pre-allocate"); | |
1616 | } | |
1617 | out_with_err_set: | |
1618 | if (bs) { | |
1619 | bdrv_unref(bs); | |
1620 | } | |
1621 | g_free(buf); | |
1622 | ||
1623 | return ret; | |
1624 | } | |
1625 | ||
1626 | /* | |
1627 | * Sheepdog support two kinds of redundancy, full replication and erasure | |
1628 | * coding. | |
1629 | * | |
1630 | * # create a fully replicated vdi with x copies | |
1631 | * -o redundancy=x (1 <= x <= SD_MAX_COPIES) | |
1632 | * | |
1633 | * # create a erasure coded vdi with x data strips and y parity strips | |
1634 | * -o redundancy=x:y (x must be one of {2,4,8,16} and 1 <= y < SD_EC_MAX_STRIP) | |
1635 | */ | |
1636 | static int parse_redundancy(BDRVSheepdogState *s, const char *opt) | |
1637 | { | |
1638 | struct SheepdogInode *inode = &s->inode; | |
1639 | const char *n1, *n2; | |
1640 | long copy, parity; | |
1641 | char p[10]; | |
1642 | ||
1643 | pstrcpy(p, sizeof(p), opt); | |
1644 | n1 = strtok(p, ":"); | |
1645 | n2 = strtok(NULL, ":"); | |
1646 | ||
1647 | if (!n1) { | |
1648 | return -EINVAL; | |
1649 | } | |
1650 | ||
1651 | copy = strtol(n1, NULL, 10); | |
1652 | if (copy > SD_MAX_COPIES || copy < 1) { | |
1653 | return -EINVAL; | |
1654 | } | |
1655 | if (!n2) { | |
1656 | inode->copy_policy = 0; | |
1657 | inode->nr_copies = copy; | |
1658 | return 0; | |
1659 | } | |
1660 | ||
1661 | if (copy != 2 && copy != 4 && copy != 8 && copy != 16) { | |
1662 | return -EINVAL; | |
1663 | } | |
1664 | ||
1665 | parity = strtol(n2, NULL, 10); | |
1666 | if (parity >= SD_EC_MAX_STRIP || parity < 1) { | |
1667 | return -EINVAL; | |
1668 | } | |
1669 | ||
1670 | /* | |
1671 | * 4 bits for parity and 4 bits for data. | |
1672 | * We have to compress upper data bits because it can't represent 16 | |
1673 | */ | |
1674 | inode->copy_policy = ((copy / 2) << 4) + parity; | |
1675 | inode->nr_copies = copy + parity; | |
1676 | ||
1677 | return 0; | |
1678 | } | |
1679 | ||
1680 | static int sd_create(const char *filename, QemuOpts *opts, | |
1681 | Error **errp) | |
1682 | { | |
1683 | int ret = 0; | |
1684 | uint32_t vid = 0; | |
1685 | char *backing_file = NULL; | |
1686 | char *buf = NULL; | |
1687 | BDRVSheepdogState *s; | |
1688 | char tag[SD_MAX_VDI_TAG_LEN]; | |
1689 | uint32_t snapid; | |
1690 | bool prealloc = false; | |
1691 | ||
1692 | s = g_new0(BDRVSheepdogState, 1); | |
1693 | ||
1694 | memset(tag, 0, sizeof(tag)); | |
1695 | if (strstr(filename, "://")) { | |
1696 | ret = sd_parse_uri(s, filename, s->name, &snapid, tag); | |
1697 | } else { | |
1698 | ret = parse_vdiname(s, filename, s->name, &snapid, tag); | |
1699 | } | |
1700 | if (ret < 0) { | |
1701 | error_setg(errp, "Can't parse filename"); | |
1702 | goto out; | |
1703 | } | |
1704 | ||
1705 | s->inode.vdi_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); | |
1706 | backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); | |
1707 | buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); | |
1708 | if (!buf || !strcmp(buf, "off")) { | |
1709 | prealloc = false; | |
1710 | } else if (!strcmp(buf, "full")) { | |
1711 | prealloc = true; | |
1712 | } else { | |
1713 | error_setg(errp, "Invalid preallocation mode: '%s'", buf); | |
1714 | ret = -EINVAL; | |
1715 | goto out; | |
1716 | } | |
1717 | ||
1718 | g_free(buf); | |
1719 | buf = qemu_opt_get_del(opts, BLOCK_OPT_REDUNDANCY); | |
1720 | if (buf) { | |
1721 | ret = parse_redundancy(s, buf); | |
1722 | if (ret < 0) { | |
1723 | error_setg(errp, "Invalid redundancy mode: '%s'", buf); | |
1724 | goto out; | |
1725 | } | |
1726 | } | |
1727 | ||
1728 | if (s->inode.vdi_size > SD_MAX_VDI_SIZE) { | |
1729 | error_setg(errp, "too big image size"); | |
1730 | ret = -EINVAL; | |
1731 | goto out; | |
1732 | } | |
1733 | ||
1734 | if (backing_file) { | |
1735 | BlockDriverState *bs; | |
1736 | BDRVSheepdogState *base; | |
1737 | BlockDriver *drv; | |
1738 | ||
1739 | /* Currently, only Sheepdog backing image is supported. */ | |
1740 | drv = bdrv_find_protocol(backing_file, true); | |
1741 | if (!drv || strcmp(drv->protocol_name, "sheepdog") != 0) { | |
1742 | error_setg(errp, "backing_file must be a sheepdog image"); | |
1743 | ret = -EINVAL; | |
1744 | goto out; | |
1745 | } | |
1746 | ||
1747 | bs = NULL; | |
1748 | ret = bdrv_open(&bs, backing_file, NULL, NULL, BDRV_O_PROTOCOL, NULL, | |
1749 | errp); | |
1750 | if (ret < 0) { | |
1751 | goto out; | |
1752 | } | |
1753 | ||
1754 | base = bs->opaque; | |
1755 | ||
1756 | if (!is_snapshot(&base->inode)) { | |
1757 | error_setg(errp, "cannot clone from a non snapshot vdi"); | |
1758 | bdrv_unref(bs); | |
1759 | ret = -EINVAL; | |
1760 | goto out; | |
1761 | } | |
1762 | s->inode.vdi_id = base->inode.vdi_id; | |
1763 | bdrv_unref(bs); | |
1764 | } | |
1765 | ||
1766 | s->aio_context = qemu_get_aio_context(); | |
1767 | ret = do_sd_create(s, &vid, 0, errp); | |
1768 | if (ret) { | |
1769 | goto out; | |
1770 | } | |
1771 | ||
1772 | if (prealloc) { | |
1773 | ret = sd_prealloc(filename, errp); | |
1774 | } | |
1775 | out: | |
1776 | g_free(backing_file); | |
1777 | g_free(buf); | |
1778 | g_free(s); | |
1779 | return ret; | |
1780 | } | |
1781 | ||
1782 | static void sd_close(BlockDriverState *bs) | |
1783 | { | |
1784 | Error *local_err = NULL; | |
1785 | BDRVSheepdogState *s = bs->opaque; | |
1786 | SheepdogVdiReq hdr; | |
1787 | SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr; | |
1788 | unsigned int wlen, rlen = 0; | |
1789 | int fd, ret; | |
1790 | ||
1791 | DPRINTF("%s\n", s->name); | |
1792 | ||
1793 | fd = connect_to_sdog(s, &local_err); | |
1794 | if (fd < 0) { | |
1795 | error_report("%s", error_get_pretty(local_err));; | |
1796 | error_free(local_err); | |
1797 | return; | |
1798 | } | |
1799 | ||
1800 | memset(&hdr, 0, sizeof(hdr)); | |
1801 | ||
1802 | hdr.opcode = SD_OP_RELEASE_VDI; | |
1803 | hdr.type = LOCK_TYPE_NORMAL; | |
1804 | hdr.base_vdi_id = s->inode.vdi_id; | |
1805 | wlen = strlen(s->name) + 1; | |
1806 | hdr.data_length = wlen; | |
1807 | hdr.flags = SD_FLAG_CMD_WRITE; | |
1808 | ||
1809 | ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr, | |
1810 | s->name, &wlen, &rlen); | |
1811 | ||
1812 | closesocket(fd); | |
1813 | ||
1814 | if (!ret && rsp->result != SD_RES_SUCCESS && | |
1815 | rsp->result != SD_RES_VDI_NOT_LOCKED) { | |
1816 | error_report("%s, %s", sd_strerror(rsp->result), s->name); | |
1817 | } | |
1818 | ||
1819 | aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd, NULL, NULL, NULL); | |
1820 | closesocket(s->fd); | |
1821 | g_free(s->host_spec); | |
1822 | } | |
1823 | ||
1824 | static int64_t sd_getlength(BlockDriverState *bs) | |
1825 | { | |
1826 | BDRVSheepdogState *s = bs->opaque; | |
1827 | ||
1828 | return s->inode.vdi_size; | |
1829 | } | |
1830 | ||
1831 | static int sd_truncate(BlockDriverState *bs, int64_t offset) | |
1832 | { | |
1833 | Error *local_err = NULL; | |
1834 | BDRVSheepdogState *s = bs->opaque; | |
1835 | int ret, fd; | |
1836 | unsigned int datalen; | |
1837 | ||
1838 | if (offset < s->inode.vdi_size) { | |
1839 | error_report("shrinking is not supported"); | |
1840 | return -EINVAL; | |
1841 | } else if (offset > SD_MAX_VDI_SIZE) { | |
1842 | error_report("too big image size"); | |
1843 | return -EINVAL; | |
1844 | } | |
1845 | ||
1846 | fd = connect_to_sdog(s, &local_err); | |
1847 | if (fd < 0) { | |
1848 | error_report("%s", error_get_pretty(local_err));; | |
1849 | error_free(local_err); | |
1850 | return fd; | |
1851 | } | |
1852 | ||
1853 | /* we don't need to update entire object */ | |
1854 | datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id); | |
1855 | s->inode.vdi_size = offset; | |
1856 | ret = write_object(fd, s->aio_context, (char *)&s->inode, | |
1857 | vid_to_vdi_oid(s->inode.vdi_id), s->inode.nr_copies, | |
1858 | datalen, 0, false, s->cache_flags); | |
1859 | close(fd); | |
1860 | ||
1861 | if (ret < 0) { | |
1862 | error_report("failed to update an inode."); | |
1863 | } | |
1864 | ||
1865 | return ret; | |
1866 | } | |
1867 | ||
1868 | /* | |
1869 | * This function is called after writing data objects. If we need to | |
1870 | * update metadata, this sends a write request to the vdi object. | |
1871 | * Otherwise, this switches back to sd_co_readv/writev. | |
1872 | */ | |
1873 | static void coroutine_fn sd_write_done(SheepdogAIOCB *acb) | |
1874 | { | |
1875 | BDRVSheepdogState *s = acb->common.bs->opaque; | |
1876 | struct iovec iov; | |
1877 | AIOReq *aio_req; | |
1878 | uint32_t offset, data_len, mn, mx; | |
1879 | ||
1880 | mn = s->min_dirty_data_idx; | |
1881 | mx = s->max_dirty_data_idx; | |
1882 | if (mn <= mx) { | |
1883 | /* we need to update the vdi object. */ | |
1884 | offset = sizeof(s->inode) - sizeof(s->inode.data_vdi_id) + | |
1885 | mn * sizeof(s->inode.data_vdi_id[0]); | |
1886 | data_len = (mx - mn + 1) * sizeof(s->inode.data_vdi_id[0]); | |
1887 | ||
1888 | s->min_dirty_data_idx = UINT32_MAX; | |
1889 | s->max_dirty_data_idx = 0; | |
1890 | ||
1891 | iov.iov_base = &s->inode; | |
1892 | iov.iov_len = sizeof(s->inode); | |
1893 | aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id), | |
1894 | data_len, offset, 0, false, 0, offset); | |
1895 | QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | |
1896 | add_aio_request(s, aio_req, &iov, 1, AIOCB_WRITE_UDATA); | |
1897 | ||
1898 | acb->aio_done_func = sd_finish_aiocb; | |
1899 | acb->aiocb_type = AIOCB_WRITE_UDATA; | |
1900 | return; | |
1901 | } | |
1902 | ||
1903 | sd_finish_aiocb(acb); | |
1904 | } | |
1905 | ||
1906 | /* Delete current working VDI on the snapshot chain */ | |
1907 | static bool sd_delete(BDRVSheepdogState *s) | |
1908 | { | |
1909 | Error *local_err = NULL; | |
1910 | unsigned int wlen = SD_MAX_VDI_LEN, rlen = 0; | |
1911 | SheepdogVdiReq hdr = { | |
1912 | .opcode = SD_OP_DEL_VDI, | |
1913 | .base_vdi_id = s->inode.vdi_id, | |
1914 | .data_length = wlen, | |
1915 | .flags = SD_FLAG_CMD_WRITE, | |
1916 | }; | |
1917 | SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr; | |
1918 | int fd, ret; | |
1919 | ||
1920 | fd = connect_to_sdog(s, &local_err); | |
1921 | if (fd < 0) { | |
1922 | error_report("%s", error_get_pretty(local_err));; | |
1923 | error_free(local_err); | |
1924 | return false; | |
1925 | } | |
1926 | ||
1927 | ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr, | |
1928 | s->name, &wlen, &rlen); | |
1929 | closesocket(fd); | |
1930 | if (ret) { | |
1931 | return false; | |
1932 | } | |
1933 | switch (rsp->result) { | |
1934 | case SD_RES_NO_VDI: | |
1935 | error_report("%s was already deleted", s->name); | |
1936 | /* fall through */ | |
1937 | case SD_RES_SUCCESS: | |
1938 | break; | |
1939 | default: | |
1940 | error_report("%s, %s", sd_strerror(rsp->result), s->name); | |
1941 | return false; | |
1942 | } | |
1943 | ||
1944 | return true; | |
1945 | } | |
1946 | ||
1947 | /* | |
1948 | * Create a writable VDI from a snapshot | |
1949 | */ | |
1950 | static int sd_create_branch(BDRVSheepdogState *s) | |
1951 | { | |
1952 | Error *local_err = NULL; | |
1953 | int ret, fd; | |
1954 | uint32_t vid; | |
1955 | char *buf; | |
1956 | bool deleted; | |
1957 | ||
1958 | DPRINTF("%" PRIx32 " is snapshot.\n", s->inode.vdi_id); | |
1959 | ||
1960 | buf = g_malloc(SD_INODE_SIZE); | |
1961 | ||
1962 | /* | |
1963 | * Even If deletion fails, we will just create extra snapshot based on | |
1964 | * the working VDI which was supposed to be deleted. So no need to | |
1965 | * false bail out. | |
1966 | */ | |
1967 | deleted = sd_delete(s); | |
1968 | ret = do_sd_create(s, &vid, !deleted, &local_err); | |
1969 | if (ret) { | |
1970 | error_report("%s", error_get_pretty(local_err));; | |
1971 | error_free(local_err); | |
1972 | goto out; | |
1973 | } | |
1974 | ||
1975 | DPRINTF("%" PRIx32 " is created.\n", vid); | |
1976 | ||
1977 | fd = connect_to_sdog(s, &local_err); | |
1978 | if (fd < 0) { | |
1979 | error_report("%s", error_get_pretty(local_err));; | |
1980 | error_free(local_err); | |
1981 | ret = fd; | |
1982 | goto out; | |
1983 | } | |
1984 | ||
1985 | ret = read_object(fd, s->aio_context, buf, vid_to_vdi_oid(vid), | |
1986 | s->inode.nr_copies, SD_INODE_SIZE, 0, s->cache_flags); | |
1987 | ||
1988 | closesocket(fd); | |
1989 | ||
1990 | if (ret < 0) { | |
1991 | goto out; | |
1992 | } | |
1993 | ||
1994 | memcpy(&s->inode, buf, sizeof(s->inode)); | |
1995 | ||
1996 | s->is_snapshot = false; | |
1997 | ret = 0; | |
1998 | DPRINTF("%" PRIx32 " was newly created.\n", s->inode.vdi_id); | |
1999 | ||
2000 | out: | |
2001 | g_free(buf); | |
2002 | ||
2003 | return ret; | |
2004 | } | |
2005 | ||
2006 | /* | |
2007 | * Send I/O requests to the server. | |
2008 | * | |
2009 | * This function sends requests to the server, links the requests to | |
2010 | * the inflight_list in BDRVSheepdogState, and exits without | |
2011 | * waiting the response. The responses are received in the | |
2012 | * `aio_read_response' function which is called from the main loop as | |
2013 | * a fd handler. | |
2014 | * | |
2015 | * Returns 1 when we need to wait a response, 0 when there is no sent | |
2016 | * request and -errno in error cases. | |
2017 | */ | |
2018 | static int coroutine_fn sd_co_rw_vector(void *p) | |
2019 | { | |
2020 | SheepdogAIOCB *acb = p; | |
2021 | int ret = 0; | |
2022 | unsigned long len, done = 0, total = acb->nb_sectors * BDRV_SECTOR_SIZE; | |
2023 | unsigned long idx = acb->sector_num * BDRV_SECTOR_SIZE / SD_DATA_OBJ_SIZE; | |
2024 | uint64_t oid; | |
2025 | uint64_t offset = (acb->sector_num * BDRV_SECTOR_SIZE) % SD_DATA_OBJ_SIZE; | |
2026 | BDRVSheepdogState *s = acb->common.bs->opaque; | |
2027 | SheepdogInode *inode = &s->inode; | |
2028 | AIOReq *aio_req; | |
2029 | ||
2030 | if (acb->aiocb_type == AIOCB_WRITE_UDATA && s->is_snapshot) { | |
2031 | /* | |
2032 | * In the case we open the snapshot VDI, Sheepdog creates the | |
2033 | * writable VDI when we do a write operation first. | |
2034 | */ | |
2035 | ret = sd_create_branch(s); | |
2036 | if (ret) { | |
2037 | acb->ret = -EIO; | |
2038 | goto out; | |
2039 | } | |
2040 | } | |
2041 | ||
2042 | /* | |
2043 | * Make sure we don't free the aiocb before we are done with all requests. | |
2044 | * This additional reference is dropped at the end of this function. | |
2045 | */ | |
2046 | acb->nr_pending++; | |
2047 | ||
2048 | while (done != total) { | |
2049 | uint8_t flags = 0; | |
2050 | uint64_t old_oid = 0; | |
2051 | bool create = false; | |
2052 | ||
2053 | oid = vid_to_data_oid(inode->data_vdi_id[idx], idx); | |
2054 | ||
2055 | len = MIN(total - done, SD_DATA_OBJ_SIZE - offset); | |
2056 | ||
2057 | switch (acb->aiocb_type) { | |
2058 | case AIOCB_READ_UDATA: | |
2059 | if (!inode->data_vdi_id[idx]) { | |
2060 | qemu_iovec_memset(acb->qiov, done, 0, len); | |
2061 | goto done; | |
2062 | } | |
2063 | break; | |
2064 | case AIOCB_WRITE_UDATA: | |
2065 | if (!inode->data_vdi_id[idx]) { | |
2066 | create = true; | |
2067 | } else if (!is_data_obj_writable(inode, idx)) { | |
2068 | /* Copy-On-Write */ | |
2069 | create = true; | |
2070 | old_oid = oid; | |
2071 | flags = SD_FLAG_CMD_COW; | |
2072 | } | |
2073 | break; | |
2074 | case AIOCB_DISCARD_OBJ: | |
2075 | /* | |
2076 | * We discard the object only when the whole object is | |
2077 | * 1) allocated 2) trimmed. Otherwise, simply skip it. | |
2078 | */ | |
2079 | if (len != SD_DATA_OBJ_SIZE || inode->data_vdi_id[idx] == 0) { | |
2080 | goto done; | |
2081 | } | |
2082 | break; | |
2083 | default: | |
2084 | break; | |
2085 | } | |
2086 | ||
2087 | if (create) { | |
2088 | DPRINTF("update ino (%" PRIu32 ") %" PRIu64 " %" PRIu64 " %ld\n", | |
2089 | inode->vdi_id, oid, | |
2090 | vid_to_data_oid(inode->data_vdi_id[idx], idx), idx); | |
2091 | oid = vid_to_data_oid(inode->vdi_id, idx); | |
2092 | DPRINTF("new oid %" PRIx64 "\n", oid); | |
2093 | } | |
2094 | ||
2095 | aio_req = alloc_aio_req(s, acb, oid, len, offset, flags, create, | |
2096 | old_oid, done); | |
2097 | QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | |
2098 | ||
2099 | if (create) { | |
2100 | if (check_simultaneous_create(s, aio_req)) { | |
2101 | goto done; | |
2102 | } | |
2103 | } | |
2104 | ||
2105 | add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov, | |
2106 | acb->aiocb_type); | |
2107 | done: | |
2108 | offset = 0; | |
2109 | idx++; | |
2110 | done += len; | |
2111 | } | |
2112 | out: | |
2113 | if (!--acb->nr_pending) { | |
2114 | return acb->ret; | |
2115 | } | |
2116 | return 1; | |
2117 | } | |
2118 | ||
2119 | static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, | |
2120 | int nb_sectors, QEMUIOVector *qiov) | |
2121 | { | |
2122 | SheepdogAIOCB *acb; | |
2123 | int ret; | |
2124 | int64_t offset = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE; | |
2125 | BDRVSheepdogState *s = bs->opaque; | |
2126 | ||
2127 | if (bs->growable && offset > s->inode.vdi_size) { | |
2128 | ret = sd_truncate(bs, offset); | |
2129 | if (ret < 0) { | |
2130 | return ret; | |
2131 | } | |
2132 | } | |
2133 | ||
2134 | acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors); | |
2135 | acb->aio_done_func = sd_write_done; | |
2136 | acb->aiocb_type = AIOCB_WRITE_UDATA; | |
2137 | ||
2138 | ret = sd_co_rw_vector(acb); | |
2139 | if (ret <= 0) { | |
2140 | qemu_aio_release(acb); | |
2141 | return ret; | |
2142 | } | |
2143 | ||
2144 | qemu_coroutine_yield(); | |
2145 | ||
2146 | return acb->ret; | |
2147 | } | |
2148 | ||
2149 | static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num, | |
2150 | int nb_sectors, QEMUIOVector *qiov) | |
2151 | { | |
2152 | SheepdogAIOCB *acb; | |
2153 | int ret; | |
2154 | ||
2155 | acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors); | |
2156 | acb->aiocb_type = AIOCB_READ_UDATA; | |
2157 | acb->aio_done_func = sd_finish_aiocb; | |
2158 | ||
2159 | ret = sd_co_rw_vector(acb); | |
2160 | if (ret <= 0) { | |
2161 | qemu_aio_release(acb); | |
2162 | return ret; | |
2163 | } | |
2164 | ||
2165 | qemu_coroutine_yield(); | |
2166 | ||
2167 | return acb->ret; | |
2168 | } | |
2169 | ||
2170 | static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs) | |
2171 | { | |
2172 | BDRVSheepdogState *s = bs->opaque; | |
2173 | SheepdogAIOCB *acb; | |
2174 | AIOReq *aio_req; | |
2175 | ||
2176 | if (s->cache_flags != SD_FLAG_CMD_CACHE) { | |
2177 | return 0; | |
2178 | } | |
2179 | ||
2180 | acb = sd_aio_setup(bs, NULL, 0, 0); | |
2181 | acb->aiocb_type = AIOCB_FLUSH_CACHE; | |
2182 | acb->aio_done_func = sd_finish_aiocb; | |
2183 | ||
2184 | aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id), | |
2185 | 0, 0, 0, false, 0, 0); | |
2186 | QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | |
2187 | add_aio_request(s, aio_req, NULL, 0, acb->aiocb_type); | |
2188 | ||
2189 | qemu_coroutine_yield(); | |
2190 | return acb->ret; | |
2191 | } | |
2192 | ||
2193 | static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) | |
2194 | { | |
2195 | Error *local_err = NULL; | |
2196 | BDRVSheepdogState *s = bs->opaque; | |
2197 | int ret, fd; | |
2198 | uint32_t new_vid; | |
2199 | SheepdogInode *inode; | |
2200 | unsigned int datalen; | |
2201 | ||
2202 | DPRINTF("sn_info: name %s id_str %s s: name %s vm_state_size %" PRId64 " " | |
2203 | "is_snapshot %d\n", sn_info->name, sn_info->id_str, | |
2204 | s->name, sn_info->vm_state_size, s->is_snapshot); | |
2205 | ||
2206 | if (s->is_snapshot) { | |
2207 | error_report("You can't create a snapshot of a snapshot VDI, " | |
2208 | "%s (%" PRIu32 ").", s->name, s->inode.vdi_id); | |
2209 | ||
2210 | return -EINVAL; | |
2211 | } | |
2212 | ||
2213 | DPRINTF("%s %s\n", sn_info->name, sn_info->id_str); | |
2214 | ||
2215 | s->inode.vm_state_size = sn_info->vm_state_size; | |
2216 | s->inode.vm_clock_nsec = sn_info->vm_clock_nsec; | |
2217 | /* It appears that inode.tag does not require a NUL terminator, | |
2218 | * which means this use of strncpy is ok. | |
2219 | */ | |
2220 | strncpy(s->inode.tag, sn_info->name, sizeof(s->inode.tag)); | |
2221 | /* we don't need to update entire object */ | |
2222 | datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id); | |
2223 | inode = g_malloc(datalen); | |
2224 | ||
2225 | /* refresh inode. */ | |
2226 | fd = connect_to_sdog(s, &local_err); | |
2227 | if (fd < 0) { | |
2228 | error_report("%s", error_get_pretty(local_err));; | |
2229 | error_free(local_err); | |
2230 | ret = fd; | |
2231 | goto cleanup; | |
2232 | } | |
2233 | ||
2234 | ret = write_object(fd, s->aio_context, (char *)&s->inode, | |
2235 | vid_to_vdi_oid(s->inode.vdi_id), s->inode.nr_copies, | |
2236 | datalen, 0, false, s->cache_flags); | |
2237 | if (ret < 0) { | |
2238 | error_report("failed to write snapshot's inode."); | |
2239 | goto cleanup; | |
2240 | } | |
2241 | ||
2242 | ret = do_sd_create(s, &new_vid, 1, &local_err); | |
2243 | if (ret < 0) { | |
2244 | error_report("%s", error_get_pretty(local_err));; | |
2245 | error_free(local_err); | |
2246 | error_report("failed to create inode for snapshot. %s", | |
2247 | strerror(errno)); | |
2248 | goto cleanup; | |
2249 | } | |
2250 | ||
2251 | ret = read_object(fd, s->aio_context, (char *)inode, | |
2252 | vid_to_vdi_oid(new_vid), s->inode.nr_copies, datalen, 0, | |
2253 | s->cache_flags); | |
2254 | ||
2255 | if (ret < 0) { | |
2256 | error_report("failed to read new inode info. %s", strerror(errno)); | |
2257 | goto cleanup; | |
2258 | } | |
2259 | ||
2260 | memcpy(&s->inode, inode, datalen); | |
2261 | DPRINTF("s->inode: name %s snap_id %x oid %x\n", | |
2262 | s->inode.name, s->inode.snap_id, s->inode.vdi_id); | |
2263 | ||
2264 | cleanup: | |
2265 | g_free(inode); | |
2266 | closesocket(fd); | |
2267 | return ret; | |
2268 | } | |
2269 | ||
2270 | /* | |
2271 | * We implement rollback(loadvm) operation to the specified snapshot by | |
2272 | * 1) switch to the snapshot | |
2273 | * 2) rely on sd_create_branch to delete working VDI and | |
2274 | * 3) create a new working VDI based on the specified snapshot | |
2275 | */ | |
2276 | static int sd_snapshot_goto(BlockDriverState *bs, const char *snapshot_id) | |
2277 | { | |
2278 | BDRVSheepdogState *s = bs->opaque; | |
2279 | BDRVSheepdogState *old_s; | |
2280 | char tag[SD_MAX_VDI_TAG_LEN]; | |
2281 | uint32_t snapid = 0; | |
2282 | int ret = 0; | |
2283 | ||
2284 | old_s = g_new(BDRVSheepdogState, 1); | |
2285 | ||
2286 | memcpy(old_s, s, sizeof(BDRVSheepdogState)); | |
2287 | ||
2288 | snapid = strtoul(snapshot_id, NULL, 10); | |
2289 | if (snapid) { | |
2290 | tag[0] = 0; | |
2291 | } else { | |
2292 | pstrcpy(tag, sizeof(tag), snapshot_id); | |
2293 | } | |
2294 | ||
2295 | ret = reload_inode(s, snapid, tag); | |
2296 | if (ret) { | |
2297 | goto out; | |
2298 | } | |
2299 | ||
2300 | ret = sd_create_branch(s); | |
2301 | if (ret) { | |
2302 | goto out; | |
2303 | } | |
2304 | ||
2305 | g_free(old_s); | |
2306 | ||
2307 | return 0; | |
2308 | out: | |
2309 | /* recover bdrv_sd_state */ | |
2310 | memcpy(s, old_s, sizeof(BDRVSheepdogState)); | |
2311 | g_free(old_s); | |
2312 | ||
2313 | error_report("failed to open. recover old bdrv_sd_state."); | |
2314 | ||
2315 | return ret; | |
2316 | } | |
2317 | ||
2318 | static int sd_snapshot_delete(BlockDriverState *bs, | |
2319 | const char *snapshot_id, | |
2320 | const char *name, | |
2321 | Error **errp) | |
2322 | { | |
2323 | /* FIXME: Delete specified snapshot id. */ | |
2324 | return 0; | |
2325 | } | |
2326 | ||
2327 | static int sd_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab) | |
2328 | { | |
2329 | Error *local_err = NULL; | |
2330 | BDRVSheepdogState *s = bs->opaque; | |
2331 | SheepdogReq req; | |
2332 | int fd, nr = 1024, ret, max = BITS_TO_LONGS(SD_NR_VDIS) * sizeof(long); | |
2333 | QEMUSnapshotInfo *sn_tab = NULL; | |
2334 | unsigned wlen, rlen; | |
2335 | int found = 0; | |
2336 | static SheepdogInode inode; | |
2337 | unsigned long *vdi_inuse; | |
2338 | unsigned int start_nr; | |
2339 | uint64_t hval; | |
2340 | uint32_t vid; | |
2341 | ||
2342 | vdi_inuse = g_malloc(max); | |
2343 | ||
2344 | fd = connect_to_sdog(s, &local_err); | |
2345 | if (fd < 0) { | |
2346 | error_report("%s", error_get_pretty(local_err));; | |
2347 | error_free(local_err); | |
2348 | ret = fd; | |
2349 | goto out; | |
2350 | } | |
2351 | ||
2352 | rlen = max; | |
2353 | wlen = 0; | |
2354 | ||
2355 | memset(&req, 0, sizeof(req)); | |
2356 | ||
2357 | req.opcode = SD_OP_READ_VDIS; | |
2358 | req.data_length = max; | |
2359 | ||
2360 | ret = do_req(fd, s->aio_context, (SheepdogReq *)&req, | |
2361 | vdi_inuse, &wlen, &rlen); | |
2362 | ||
2363 | closesocket(fd); | |
2364 | if (ret) { | |
2365 | goto out; | |
2366 | } | |
2367 | ||
2368 | sn_tab = g_new0(QEMUSnapshotInfo, nr); | |
2369 | ||
2370 | /* calculate a vdi id with hash function */ | |
2371 | hval = fnv_64a_buf(s->name, strlen(s->name), FNV1A_64_INIT); | |
2372 | start_nr = hval & (SD_NR_VDIS - 1); | |
2373 | ||
2374 | fd = connect_to_sdog(s, &local_err); | |
2375 | if (fd < 0) { | |
2376 | error_report("%s", error_get_pretty(local_err));; | |
2377 | error_free(local_err); | |
2378 | ret = fd; | |
2379 | goto out; | |
2380 | } | |
2381 | ||
2382 | for (vid = start_nr; found < nr; vid = (vid + 1) % SD_NR_VDIS) { | |
2383 | if (!test_bit(vid, vdi_inuse)) { | |
2384 | break; | |
2385 | } | |
2386 | ||
2387 | /* we don't need to read entire object */ | |
2388 | ret = read_object(fd, s->aio_context, (char *)&inode, | |
2389 | vid_to_vdi_oid(vid), | |
2390 | 0, SD_INODE_SIZE - sizeof(inode.data_vdi_id), 0, | |
2391 | s->cache_flags); | |
2392 | ||
2393 | if (ret) { | |
2394 | continue; | |
2395 | } | |
2396 | ||
2397 | if (!strcmp(inode.name, s->name) && is_snapshot(&inode)) { | |
2398 | sn_tab[found].date_sec = inode.snap_ctime >> 32; | |
2399 | sn_tab[found].date_nsec = inode.snap_ctime & 0xffffffff; | |
2400 | sn_tab[found].vm_state_size = inode.vm_state_size; | |
2401 | sn_tab[found].vm_clock_nsec = inode.vm_clock_nsec; | |
2402 | ||
2403 | snprintf(sn_tab[found].id_str, sizeof(sn_tab[found].id_str), | |
2404 | "%" PRIu32, inode.snap_id); | |
2405 | pstrcpy(sn_tab[found].name, | |
2406 | MIN(sizeof(sn_tab[found].name), sizeof(inode.tag)), | |
2407 | inode.tag); | |
2408 | found++; | |
2409 | } | |
2410 | } | |
2411 | ||
2412 | closesocket(fd); | |
2413 | out: | |
2414 | *psn_tab = sn_tab; | |
2415 | ||
2416 | g_free(vdi_inuse); | |
2417 | ||
2418 | if (ret < 0) { | |
2419 | return ret; | |
2420 | } | |
2421 | ||
2422 | return found; | |
2423 | } | |
2424 | ||
2425 | static int do_load_save_vmstate(BDRVSheepdogState *s, uint8_t *data, | |
2426 | int64_t pos, int size, int load) | |
2427 | { | |
2428 | Error *local_err = NULL; | |
2429 | bool create; | |
2430 | int fd, ret = 0, remaining = size; | |
2431 | unsigned int data_len; | |
2432 | uint64_t vmstate_oid; | |
2433 | uint64_t offset; | |
2434 | uint32_t vdi_index; | |
2435 | uint32_t vdi_id = load ? s->inode.parent_vdi_id : s->inode.vdi_id; | |
2436 | ||
2437 | fd = connect_to_sdog(s, &local_err); | |
2438 | if (fd < 0) { | |
2439 | error_report("%s", error_get_pretty(local_err));; | |
2440 | error_free(local_err); | |
2441 | return fd; | |
2442 | } | |
2443 | ||
2444 | while (remaining) { | |
2445 | vdi_index = pos / SD_DATA_OBJ_SIZE; | |
2446 | offset = pos % SD_DATA_OBJ_SIZE; | |
2447 | ||
2448 | data_len = MIN(remaining, SD_DATA_OBJ_SIZE - offset); | |
2449 | ||
2450 | vmstate_oid = vid_to_vmstate_oid(vdi_id, vdi_index); | |
2451 | ||
2452 | create = (offset == 0); | |
2453 | if (load) { | |
2454 | ret = read_object(fd, s->aio_context, (char *)data, vmstate_oid, | |
2455 | s->inode.nr_copies, data_len, offset, | |
2456 | s->cache_flags); | |
2457 | } else { | |
2458 | ret = write_object(fd, s->aio_context, (char *)data, vmstate_oid, | |
2459 | s->inode.nr_copies, data_len, offset, create, | |
2460 | s->cache_flags); | |
2461 | } | |
2462 | ||
2463 | if (ret < 0) { | |
2464 | error_report("failed to save vmstate %s", strerror(errno)); | |
2465 | goto cleanup; | |
2466 | } | |
2467 | ||
2468 | pos += data_len; | |
2469 | data += data_len; | |
2470 | remaining -= data_len; | |
2471 | } | |
2472 | ret = size; | |
2473 | cleanup: | |
2474 | closesocket(fd); | |
2475 | return ret; | |
2476 | } | |
2477 | ||
2478 | static int sd_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, | |
2479 | int64_t pos) | |
2480 | { | |
2481 | BDRVSheepdogState *s = bs->opaque; | |
2482 | void *buf; | |
2483 | int ret; | |
2484 | ||
2485 | buf = qemu_blockalign(bs, qiov->size); | |
2486 | qemu_iovec_to_buf(qiov, 0, buf, qiov->size); | |
2487 | ret = do_load_save_vmstate(s, (uint8_t *) buf, pos, qiov->size, 0); | |
2488 | qemu_vfree(buf); | |
2489 | ||
2490 | return ret; | |
2491 | } | |
2492 | ||
2493 | static int sd_load_vmstate(BlockDriverState *bs, uint8_t *data, | |
2494 | int64_t pos, int size) | |
2495 | { | |
2496 | BDRVSheepdogState *s = bs->opaque; | |
2497 | ||
2498 | return do_load_save_vmstate(s, data, pos, size, 1); | |
2499 | } | |
2500 | ||
2501 | ||
2502 | static coroutine_fn int sd_co_discard(BlockDriverState *bs, int64_t sector_num, | |
2503 | int nb_sectors) | |
2504 | { | |
2505 | SheepdogAIOCB *acb; | |
2506 | QEMUIOVector dummy; | |
2507 | BDRVSheepdogState *s = bs->opaque; | |
2508 | int ret; | |
2509 | ||
2510 | if (!s->discard_supported) { | |
2511 | return 0; | |
2512 | } | |
2513 | ||
2514 | acb = sd_aio_setup(bs, &dummy, sector_num, nb_sectors); | |
2515 | acb->aiocb_type = AIOCB_DISCARD_OBJ; | |
2516 | acb->aio_done_func = sd_finish_aiocb; | |
2517 | ||
2518 | ret = sd_co_rw_vector(acb); | |
2519 | if (ret <= 0) { | |
2520 | qemu_aio_release(acb); | |
2521 | return ret; | |
2522 | } | |
2523 | ||
2524 | qemu_coroutine_yield(); | |
2525 | ||
2526 | return acb->ret; | |
2527 | } | |
2528 | ||
2529 | static coroutine_fn int64_t | |
2530 | sd_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, | |
2531 | int *pnum) | |
2532 | { | |
2533 | BDRVSheepdogState *s = bs->opaque; | |
2534 | SheepdogInode *inode = &s->inode; | |
2535 | uint64_t offset = sector_num * BDRV_SECTOR_SIZE; | |
2536 | unsigned long start = offset / SD_DATA_OBJ_SIZE, | |
2537 | end = DIV_ROUND_UP((sector_num + nb_sectors) * | |
2538 | BDRV_SECTOR_SIZE, SD_DATA_OBJ_SIZE); | |
2539 | unsigned long idx; | |
2540 | int64_t ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; | |
2541 | ||
2542 | for (idx = start; idx < end; idx++) { | |
2543 | if (inode->data_vdi_id[idx] == 0) { | |
2544 | break; | |
2545 | } | |
2546 | } | |
2547 | if (idx == start) { | |
2548 | /* Get the longest length of unallocated sectors */ | |
2549 | ret = 0; | |
2550 | for (idx = start + 1; idx < end; idx++) { | |
2551 | if (inode->data_vdi_id[idx] != 0) { | |
2552 | break; | |
2553 | } | |
2554 | } | |
2555 | } | |
2556 | ||
2557 | *pnum = (idx - start) * SD_DATA_OBJ_SIZE / BDRV_SECTOR_SIZE; | |
2558 | if (*pnum > nb_sectors) { | |
2559 | *pnum = nb_sectors; | |
2560 | } | |
2561 | return ret; | |
2562 | } | |
2563 | ||
2564 | static int64_t sd_get_allocated_file_size(BlockDriverState *bs) | |
2565 | { | |
2566 | BDRVSheepdogState *s = bs->opaque; | |
2567 | SheepdogInode *inode = &s->inode; | |
2568 | unsigned long i, last = DIV_ROUND_UP(inode->vdi_size, SD_DATA_OBJ_SIZE); | |
2569 | uint64_t size = 0; | |
2570 | ||
2571 | for (i = 0; i < last; i++) { | |
2572 | if (inode->data_vdi_id[i] == 0) { | |
2573 | continue; | |
2574 | } | |
2575 | size += SD_DATA_OBJ_SIZE; | |
2576 | } | |
2577 | return size; | |
2578 | } | |
2579 | ||
2580 | static QemuOptsList sd_create_opts = { | |
2581 | .name = "sheepdog-create-opts", | |
2582 | .head = QTAILQ_HEAD_INITIALIZER(sd_create_opts.head), | |
2583 | .desc = { | |
2584 | { | |
2585 | .name = BLOCK_OPT_SIZE, | |
2586 | .type = QEMU_OPT_SIZE, | |
2587 | .help = "Virtual disk size" | |
2588 | }, | |
2589 | { | |
2590 | .name = BLOCK_OPT_BACKING_FILE, | |
2591 | .type = QEMU_OPT_STRING, | |
2592 | .help = "File name of a base image" | |
2593 | }, | |
2594 | { | |
2595 | .name = BLOCK_OPT_PREALLOC, | |
2596 | .type = QEMU_OPT_STRING, | |
2597 | .help = "Preallocation mode (allowed values: off, full)" | |
2598 | }, | |
2599 | { | |
2600 | .name = BLOCK_OPT_REDUNDANCY, | |
2601 | .type = QEMU_OPT_STRING, | |
2602 | .help = "Redundancy of the image" | |
2603 | }, | |
2604 | { /* end of list */ } | |
2605 | } | |
2606 | }; | |
2607 | ||
2608 | static BlockDriver bdrv_sheepdog = { | |
2609 | .format_name = "sheepdog", | |
2610 | .protocol_name = "sheepdog", | |
2611 | .instance_size = sizeof(BDRVSheepdogState), | |
2612 | .bdrv_needs_filename = true, | |
2613 | .bdrv_file_open = sd_open, | |
2614 | .bdrv_close = sd_close, | |
2615 | .bdrv_create = sd_create, | |
2616 | .bdrv_has_zero_init = bdrv_has_zero_init_1, | |
2617 | .bdrv_getlength = sd_getlength, | |
2618 | .bdrv_get_allocated_file_size = sd_get_allocated_file_size, | |
2619 | .bdrv_truncate = sd_truncate, | |
2620 | ||
2621 | .bdrv_co_readv = sd_co_readv, | |
2622 | .bdrv_co_writev = sd_co_writev, | |
2623 | .bdrv_co_flush_to_disk = sd_co_flush_to_disk, | |
2624 | .bdrv_co_discard = sd_co_discard, | |
2625 | .bdrv_co_get_block_status = sd_co_get_block_status, | |
2626 | ||
2627 | .bdrv_snapshot_create = sd_snapshot_create, | |
2628 | .bdrv_snapshot_goto = sd_snapshot_goto, | |
2629 | .bdrv_snapshot_delete = sd_snapshot_delete, | |
2630 | .bdrv_snapshot_list = sd_snapshot_list, | |
2631 | ||
2632 | .bdrv_save_vmstate = sd_save_vmstate, | |
2633 | .bdrv_load_vmstate = sd_load_vmstate, | |
2634 | ||
2635 | .bdrv_detach_aio_context = sd_detach_aio_context, | |
2636 | .bdrv_attach_aio_context = sd_attach_aio_context, | |
2637 | ||
2638 | .create_opts = &sd_create_opts, | |
2639 | }; | |
2640 | ||
2641 | static BlockDriver bdrv_sheepdog_tcp = { | |
2642 | .format_name = "sheepdog", | |
2643 | .protocol_name = "sheepdog+tcp", | |
2644 | .instance_size = sizeof(BDRVSheepdogState), | |
2645 | .bdrv_needs_filename = true, | |
2646 | .bdrv_file_open = sd_open, | |
2647 | .bdrv_close = sd_close, | |
2648 | .bdrv_create = sd_create, | |
2649 | .bdrv_has_zero_init = bdrv_has_zero_init_1, | |
2650 | .bdrv_getlength = sd_getlength, | |
2651 | .bdrv_get_allocated_file_size = sd_get_allocated_file_size, | |
2652 | .bdrv_truncate = sd_truncate, | |
2653 | ||
2654 | .bdrv_co_readv = sd_co_readv, | |
2655 | .bdrv_co_writev = sd_co_writev, | |
2656 | .bdrv_co_flush_to_disk = sd_co_flush_to_disk, | |
2657 | .bdrv_co_discard = sd_co_discard, | |
2658 | .bdrv_co_get_block_status = sd_co_get_block_status, | |
2659 | ||
2660 | .bdrv_snapshot_create = sd_snapshot_create, | |
2661 | .bdrv_snapshot_goto = sd_snapshot_goto, | |
2662 | .bdrv_snapshot_delete = sd_snapshot_delete, | |
2663 | .bdrv_snapshot_list = sd_snapshot_list, | |
2664 | ||
2665 | .bdrv_save_vmstate = sd_save_vmstate, | |
2666 | .bdrv_load_vmstate = sd_load_vmstate, | |
2667 | ||
2668 | .bdrv_detach_aio_context = sd_detach_aio_context, | |
2669 | .bdrv_attach_aio_context = sd_attach_aio_context, | |
2670 | ||
2671 | .create_opts = &sd_create_opts, | |
2672 | }; | |
2673 | ||
2674 | static BlockDriver bdrv_sheepdog_unix = { | |
2675 | .format_name = "sheepdog", | |
2676 | .protocol_name = "sheepdog+unix", | |
2677 | .instance_size = sizeof(BDRVSheepdogState), | |
2678 | .bdrv_needs_filename = true, | |
2679 | .bdrv_file_open = sd_open, | |
2680 | .bdrv_close = sd_close, | |
2681 | .bdrv_create = sd_create, | |
2682 | .bdrv_has_zero_init = bdrv_has_zero_init_1, | |
2683 | .bdrv_getlength = sd_getlength, | |
2684 | .bdrv_get_allocated_file_size = sd_get_allocated_file_size, | |
2685 | .bdrv_truncate = sd_truncate, | |
2686 | ||
2687 | .bdrv_co_readv = sd_co_readv, | |
2688 | .bdrv_co_writev = sd_co_writev, | |
2689 | .bdrv_co_flush_to_disk = sd_co_flush_to_disk, | |
2690 | .bdrv_co_discard = sd_co_discard, | |
2691 | .bdrv_co_get_block_status = sd_co_get_block_status, | |
2692 | ||
2693 | .bdrv_snapshot_create = sd_snapshot_create, | |
2694 | .bdrv_snapshot_goto = sd_snapshot_goto, | |
2695 | .bdrv_snapshot_delete = sd_snapshot_delete, | |
2696 | .bdrv_snapshot_list = sd_snapshot_list, | |
2697 | ||
2698 | .bdrv_save_vmstate = sd_save_vmstate, | |
2699 | .bdrv_load_vmstate = sd_load_vmstate, | |
2700 | ||
2701 | .bdrv_detach_aio_context = sd_detach_aio_context, | |
2702 | .bdrv_attach_aio_context = sd_attach_aio_context, | |
2703 | ||
2704 | .create_opts = &sd_create_opts, | |
2705 | }; | |
2706 | ||
2707 | static void bdrv_sheepdog_init(void) | |
2708 | { | |
2709 | bdrv_register(&bdrv_sheepdog); | |
2710 | bdrv_register(&bdrv_sheepdog_tcp); | |
2711 | bdrv_register(&bdrv_sheepdog_unix); | |
2712 | } | |
2713 | block_init(bdrv_sheepdog_init); |