]> git.proxmox.com Git - qemu.git/blob - migration.c
migration: move buffered_file.c code into migration.c
[qemu.git] / migration.c
1 /*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu-common.h"
17 #include "migration/migration.h"
18 #include "monitor/monitor.h"
19 #include "migration/qemu-file.h"
20 #include "sysemu/sysemu.h"
21 #include "block/block.h"
22 #include "qemu/sockets.h"
23 #include "migration/block.h"
24 #include "qemu/thread.h"
25 #include "qmp-commands.h"
26
27 //#define DEBUG_MIGRATION
28
29 #ifdef DEBUG_MIGRATION
30 #define DPRINTF(fmt, ...) \
31 do { printf("migration: " fmt, ## __VA_ARGS__); } while (0)
32 #else
33 #define DPRINTF(fmt, ...) \
34 do { } while (0)
35 #endif
36
37 enum {
38 MIG_STATE_ERROR,
39 MIG_STATE_SETUP,
40 MIG_STATE_CANCELLED,
41 MIG_STATE_ACTIVE,
42 MIG_STATE_COMPLETED,
43 };
44
45 #define MAX_THROTTLE (32 << 20) /* Migration speed throttling */
46
47 /* Migration XBZRLE default cache size */
48 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
49
50 static NotifierList migration_state_notifiers =
51 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
52
53 /* When we add fault tolerance, we could have several
54 migrations at once. For now we don't need to add
55 dynamic creation of migration */
56
57 MigrationState *migrate_get_current(void)
58 {
59 static MigrationState current_migration = {
60 .state = MIG_STATE_SETUP,
61 .bandwidth_limit = MAX_THROTTLE,
62 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
63 };
64
65 return &current_migration;
66 }
67
68 void qemu_start_incoming_migration(const char *uri, Error **errp)
69 {
70 const char *p;
71
72 if (strstart(uri, "tcp:", &p))
73 tcp_start_incoming_migration(p, errp);
74 #if !defined(WIN32)
75 else if (strstart(uri, "exec:", &p))
76 exec_start_incoming_migration(p, errp);
77 else if (strstart(uri, "unix:", &p))
78 unix_start_incoming_migration(p, errp);
79 else if (strstart(uri, "fd:", &p))
80 fd_start_incoming_migration(p, errp);
81 #endif
82 else {
83 error_setg(errp, "unknown migration protocol: %s\n", uri);
84 }
85 }
86
87 static void process_incoming_migration_co(void *opaque)
88 {
89 QEMUFile *f = opaque;
90 int ret;
91
92 ret = qemu_loadvm_state(f);
93 qemu_set_fd_handler(qemu_get_fd(f), NULL, NULL, NULL);
94 qemu_fclose(f);
95 if (ret < 0) {
96 fprintf(stderr, "load of migration failed\n");
97 exit(0);
98 }
99 qemu_announce_self();
100 DPRINTF("successfully loaded vm state\n");
101
102 bdrv_clear_incoming_migration_all();
103 /* Make sure all file formats flush their mutable metadata */
104 bdrv_invalidate_cache_all();
105
106 if (autostart) {
107 vm_start();
108 } else {
109 runstate_set(RUN_STATE_PAUSED);
110 }
111 }
112
113 static void enter_migration_coroutine(void *opaque)
114 {
115 Coroutine *co = opaque;
116 qemu_coroutine_enter(co, NULL);
117 }
118
119 void process_incoming_migration(QEMUFile *f)
120 {
121 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
122 int fd = qemu_get_fd(f);
123
124 assert(fd != -1);
125 socket_set_nonblock(fd);
126 qemu_set_fd_handler(fd, enter_migration_coroutine, NULL, co);
127 qemu_coroutine_enter(co, f);
128 }
129
130 /* amount of nanoseconds we are willing to wait for migration to be down.
131 * the choice of nanoseconds is because it is the maximum resolution that
132 * get_clock() can achieve. It is an internal measure. All user-visible
133 * units must be in seconds */
134 static uint64_t max_downtime = 30000000;
135
136 uint64_t migrate_max_downtime(void)
137 {
138 return max_downtime;
139 }
140
141 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
142 {
143 MigrationCapabilityStatusList *head = NULL;
144 MigrationCapabilityStatusList *caps;
145 MigrationState *s = migrate_get_current();
146 int i;
147
148 for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
149 if (head == NULL) {
150 head = g_malloc0(sizeof(*caps));
151 caps = head;
152 } else {
153 caps->next = g_malloc0(sizeof(*caps));
154 caps = caps->next;
155 }
156 caps->value =
157 g_malloc(sizeof(*caps->value));
158 caps->value->capability = i;
159 caps->value->state = s->enabled_capabilities[i];
160 }
161
162 return head;
163 }
164
165 static void get_xbzrle_cache_stats(MigrationInfo *info)
166 {
167 if (migrate_use_xbzrle()) {
168 info->has_xbzrle_cache = true;
169 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
170 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
171 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
172 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
173 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
174 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
175 }
176 }
177
178 MigrationInfo *qmp_query_migrate(Error **errp)
179 {
180 MigrationInfo *info = g_malloc0(sizeof(*info));
181 MigrationState *s = migrate_get_current();
182
183 switch (s->state) {
184 case MIG_STATE_SETUP:
185 /* no migration has happened ever */
186 break;
187 case MIG_STATE_ACTIVE:
188 info->has_status = true;
189 info->status = g_strdup("active");
190 info->has_total_time = true;
191 info->total_time = qemu_get_clock_ms(rt_clock)
192 - s->total_time;
193 info->has_expected_downtime = true;
194 info->expected_downtime = s->expected_downtime;
195
196 info->has_ram = true;
197 info->ram = g_malloc0(sizeof(*info->ram));
198 info->ram->transferred = ram_bytes_transferred();
199 info->ram->remaining = ram_bytes_remaining();
200 info->ram->total = ram_bytes_total();
201 info->ram->duplicate = dup_mig_pages_transferred();
202 info->ram->normal = norm_mig_pages_transferred();
203 info->ram->normal_bytes = norm_mig_bytes_transferred();
204 info->ram->dirty_pages_rate = s->dirty_pages_rate;
205
206
207 if (blk_mig_active()) {
208 info->has_disk = true;
209 info->disk = g_malloc0(sizeof(*info->disk));
210 info->disk->transferred = blk_mig_bytes_transferred();
211 info->disk->remaining = blk_mig_bytes_remaining();
212 info->disk->total = blk_mig_bytes_total();
213 }
214
215 get_xbzrle_cache_stats(info);
216 break;
217 case MIG_STATE_COMPLETED:
218 get_xbzrle_cache_stats(info);
219
220 info->has_status = true;
221 info->status = g_strdup("completed");
222 info->total_time = s->total_time;
223 info->has_downtime = true;
224 info->downtime = s->downtime;
225
226 info->has_ram = true;
227 info->ram = g_malloc0(sizeof(*info->ram));
228 info->ram->transferred = ram_bytes_transferred();
229 info->ram->remaining = 0;
230 info->ram->total = ram_bytes_total();
231 info->ram->duplicate = dup_mig_pages_transferred();
232 info->ram->normal = norm_mig_pages_transferred();
233 info->ram->normal_bytes = norm_mig_bytes_transferred();
234 break;
235 case MIG_STATE_ERROR:
236 info->has_status = true;
237 info->status = g_strdup("failed");
238 break;
239 case MIG_STATE_CANCELLED:
240 info->has_status = true;
241 info->status = g_strdup("cancelled");
242 break;
243 }
244
245 return info;
246 }
247
248 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
249 Error **errp)
250 {
251 MigrationState *s = migrate_get_current();
252 MigrationCapabilityStatusList *cap;
253
254 if (s->state == MIG_STATE_ACTIVE) {
255 error_set(errp, QERR_MIGRATION_ACTIVE);
256 return;
257 }
258
259 for (cap = params; cap; cap = cap->next) {
260 s->enabled_capabilities[cap->value->capability] = cap->value->state;
261 }
262 }
263
264 /* shared migration helpers */
265
266 static int migrate_fd_cleanup(MigrationState *s)
267 {
268 int ret = 0;
269
270 if (s->file) {
271 DPRINTF("closing file\n");
272 ret = qemu_fclose(s->file);
273 s->file = NULL;
274 }
275
276 assert(s->fd == -1);
277 return ret;
278 }
279
280 void migrate_fd_error(MigrationState *s)
281 {
282 DPRINTF("setting error state\n");
283 s->state = MIG_STATE_ERROR;
284 notifier_list_notify(&migration_state_notifiers, s);
285 migrate_fd_cleanup(s);
286 }
287
288 static void migrate_fd_completed(MigrationState *s)
289 {
290 DPRINTF("setting completed state\n");
291 if (migrate_fd_cleanup(s) < 0) {
292 s->state = MIG_STATE_ERROR;
293 } else {
294 s->state = MIG_STATE_COMPLETED;
295 runstate_set(RUN_STATE_POSTMIGRATE);
296 }
297 notifier_list_notify(&migration_state_notifiers, s);
298 }
299
300 ssize_t migrate_fd_put_buffer(MigrationState *s, const void *data,
301 size_t size)
302 {
303 ssize_t ret;
304
305 if (s->state != MIG_STATE_ACTIVE) {
306 return -EIO;
307 }
308
309 do {
310 ret = s->write(s, data, size);
311 } while (ret == -1 && ((s->get_error(s)) == EINTR));
312
313 if (ret == -1)
314 ret = -(s->get_error(s));
315
316 return ret;
317 }
318
319 bool migrate_fd_put_ready(MigrationState *s, uint64_t max_size)
320 {
321 int ret;
322 uint64_t pending_size;
323 bool last_round = false;
324
325 qemu_mutex_lock_iothread();
326 if (s->state != MIG_STATE_ACTIVE) {
327 DPRINTF("put_ready returning because of non-active state\n");
328 qemu_mutex_unlock_iothread();
329 return false;
330 }
331 if (s->first_time) {
332 s->first_time = false;
333 DPRINTF("beginning savevm\n");
334 ret = qemu_savevm_state_begin(s->file, &s->params);
335 if (ret < 0) {
336 DPRINTF("failed, %d\n", ret);
337 migrate_fd_error(s);
338 qemu_mutex_unlock_iothread();
339 return false;
340 }
341 }
342
343 DPRINTF("iterate\n");
344 pending_size = qemu_savevm_state_pending(s->file, max_size);
345 DPRINTF("pending size %lu max %lu\n", pending_size, max_size);
346 if (pending_size >= max_size) {
347 ret = qemu_savevm_state_iterate(s->file);
348 if (ret < 0) {
349 migrate_fd_error(s);
350 }
351 } else {
352 int old_vm_running = runstate_is_running();
353 int64_t start_time, end_time;
354
355 DPRINTF("done iterating\n");
356 start_time = qemu_get_clock_ms(rt_clock);
357 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
358 if (old_vm_running) {
359 vm_stop(RUN_STATE_FINISH_MIGRATE);
360 } else {
361 vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
362 }
363
364 if (qemu_savevm_state_complete(s->file) < 0) {
365 migrate_fd_error(s);
366 } else {
367 migrate_fd_completed(s);
368 }
369 end_time = qemu_get_clock_ms(rt_clock);
370 s->total_time = end_time - s->total_time;
371 s->downtime = end_time - start_time;
372 if (s->state != MIG_STATE_COMPLETED) {
373 if (old_vm_running) {
374 vm_start();
375 }
376 }
377 last_round = true;
378 }
379 qemu_mutex_unlock_iothread();
380
381 return last_round;
382 }
383
384 static void migrate_fd_cancel(MigrationState *s)
385 {
386 if (s->state != MIG_STATE_ACTIVE)
387 return;
388
389 DPRINTF("cancelling migration\n");
390
391 s->state = MIG_STATE_CANCELLED;
392 notifier_list_notify(&migration_state_notifiers, s);
393 qemu_savevm_state_cancel(s->file);
394
395 migrate_fd_cleanup(s);
396 }
397
398 int migrate_fd_close(MigrationState *s)
399 {
400 int rc = 0;
401 if (s->fd != -1) {
402 rc = s->close(s);
403 s->fd = -1;
404 }
405 return rc;
406 }
407
408 void add_migration_state_change_notifier(Notifier *notify)
409 {
410 notifier_list_add(&migration_state_notifiers, notify);
411 }
412
413 void remove_migration_state_change_notifier(Notifier *notify)
414 {
415 notifier_remove(notify);
416 }
417
418 bool migration_is_active(MigrationState *s)
419 {
420 return s->state == MIG_STATE_ACTIVE;
421 }
422
423 bool migration_has_finished(MigrationState *s)
424 {
425 return s->state == MIG_STATE_COMPLETED;
426 }
427
428 bool migration_has_failed(MigrationState *s)
429 {
430 return (s->state == MIG_STATE_CANCELLED ||
431 s->state == MIG_STATE_ERROR);
432 }
433
434 void migrate_fd_connect(MigrationState *s)
435 {
436 s->state = MIG_STATE_ACTIVE;
437 s->first_time = true;
438 qemu_fopen_ops_buffered(s);
439 }
440
441 static MigrationState *migrate_init(const MigrationParams *params)
442 {
443 MigrationState *s = migrate_get_current();
444 int64_t bandwidth_limit = s->bandwidth_limit;
445 bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
446 int64_t xbzrle_cache_size = s->xbzrle_cache_size;
447
448 memcpy(enabled_capabilities, s->enabled_capabilities,
449 sizeof(enabled_capabilities));
450
451 memset(s, 0, sizeof(*s));
452 s->bandwidth_limit = bandwidth_limit;
453 s->params = *params;
454 memcpy(s->enabled_capabilities, enabled_capabilities,
455 sizeof(enabled_capabilities));
456 s->xbzrle_cache_size = xbzrle_cache_size;
457
458 s->bandwidth_limit = bandwidth_limit;
459 s->state = MIG_STATE_SETUP;
460 s->total_time = qemu_get_clock_ms(rt_clock);
461
462 return s;
463 }
464
465 static GSList *migration_blockers;
466
467 void migrate_add_blocker(Error *reason)
468 {
469 migration_blockers = g_slist_prepend(migration_blockers, reason);
470 }
471
472 void migrate_del_blocker(Error *reason)
473 {
474 migration_blockers = g_slist_remove(migration_blockers, reason);
475 }
476
477 void qmp_migrate(const char *uri, bool has_blk, bool blk,
478 bool has_inc, bool inc, bool has_detach, bool detach,
479 Error **errp)
480 {
481 Error *local_err = NULL;
482 MigrationState *s = migrate_get_current();
483 MigrationParams params;
484 const char *p;
485
486 params.blk = blk;
487 params.shared = inc;
488
489 if (s->state == MIG_STATE_ACTIVE) {
490 error_set(errp, QERR_MIGRATION_ACTIVE);
491 return;
492 }
493
494 if (qemu_savevm_state_blocked(errp)) {
495 return;
496 }
497
498 if (migration_blockers) {
499 *errp = error_copy(migration_blockers->data);
500 return;
501 }
502
503 s = migrate_init(&params);
504
505 if (strstart(uri, "tcp:", &p)) {
506 tcp_start_outgoing_migration(s, p, &local_err);
507 #if !defined(WIN32)
508 } else if (strstart(uri, "exec:", &p)) {
509 exec_start_outgoing_migration(s, p, &local_err);
510 } else if (strstart(uri, "unix:", &p)) {
511 unix_start_outgoing_migration(s, p, &local_err);
512 } else if (strstart(uri, "fd:", &p)) {
513 fd_start_outgoing_migration(s, p, &local_err);
514 #endif
515 } else {
516 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "uri", "a valid migration protocol");
517 return;
518 }
519
520 if (local_err) {
521 migrate_fd_error(s);
522 error_propagate(errp, local_err);
523 return;
524 }
525
526 notifier_list_notify(&migration_state_notifiers, s);
527 }
528
529 void qmp_migrate_cancel(Error **errp)
530 {
531 migrate_fd_cancel(migrate_get_current());
532 }
533
534 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
535 {
536 MigrationState *s = migrate_get_current();
537
538 /* Check for truncation */
539 if (value != (size_t)value) {
540 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
541 "exceeding address space");
542 return;
543 }
544
545 s->xbzrle_cache_size = xbzrle_cache_resize(value);
546 }
547
548 int64_t qmp_query_migrate_cache_size(Error **errp)
549 {
550 return migrate_xbzrle_cache_size();
551 }
552
553 void qmp_migrate_set_speed(int64_t value, Error **errp)
554 {
555 MigrationState *s;
556
557 if (value < 0) {
558 value = 0;
559 }
560
561 s = migrate_get_current();
562 s->bandwidth_limit = value;
563 qemu_file_set_rate_limit(s->file, s->bandwidth_limit);
564 }
565
566 void qmp_migrate_set_downtime(double value, Error **errp)
567 {
568 value *= 1e9;
569 value = MAX(0, MIN(UINT64_MAX, value));
570 max_downtime = (uint64_t)value;
571 }
572
573 int migrate_use_xbzrle(void)
574 {
575 MigrationState *s;
576
577 s = migrate_get_current();
578
579 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
580 }
581
582 int64_t migrate_xbzrle_cache_size(void)
583 {
584 MigrationState *s;
585
586 s = migrate_get_current();
587
588 return s->xbzrle_cache_size;
589 }
590
591 /* migration thread support */
592
593 typedef struct QEMUFileBuffered {
594 MigrationState *migration_state;
595 QEMUFile *file;
596 size_t bytes_xfer;
597 size_t xfer_limit;
598 uint8_t *buffer;
599 size_t buffer_size;
600 size_t buffer_capacity;
601 QemuThread thread;
602 } QEMUFileBuffered;
603
604 static ssize_t buffered_flush(QEMUFileBuffered *s)
605 {
606 size_t offset = 0;
607 ssize_t ret = 0;
608
609 DPRINTF("flushing %zu byte(s) of data\n", s->buffer_size);
610
611 while (s->bytes_xfer < s->xfer_limit && offset < s->buffer_size) {
612 size_t to_send = MIN(s->buffer_size - offset, s->xfer_limit - s->bytes_xfer);
613 ret = migrate_fd_put_buffer(s->migration_state, s->buffer + offset,
614 to_send);
615 if (ret <= 0) {
616 DPRINTF("error flushing data, %zd\n", ret);
617 break;
618 } else {
619 DPRINTF("flushed %zd byte(s)\n", ret);
620 offset += ret;
621 s->bytes_xfer += ret;
622 }
623 }
624
625 DPRINTF("flushed %zu of %zu byte(s)\n", offset, s->buffer_size);
626 memmove(s->buffer, s->buffer + offset, s->buffer_size - offset);
627 s->buffer_size -= offset;
628
629 if (ret < 0) {
630 return ret;
631 }
632 return offset;
633 }
634
635 static int buffered_put_buffer(void *opaque, const uint8_t *buf,
636 int64_t pos, int size)
637 {
638 QEMUFileBuffered *s = opaque;
639 ssize_t error;
640
641 DPRINTF("putting %d bytes at %" PRId64 "\n", size, pos);
642
643 error = qemu_file_get_error(s->file);
644 if (error) {
645 DPRINTF("flush when error, bailing: %s\n", strerror(-error));
646 return error;
647 }
648
649 if (size <= 0) {
650 return size;
651 }
652
653 if (size > (s->buffer_capacity - s->buffer_size)) {
654 DPRINTF("increasing buffer capacity from %zu by %zu\n",
655 s->buffer_capacity, size + 1024);
656
657 s->buffer_capacity += size + 1024;
658
659 s->buffer = g_realloc(s->buffer, s->buffer_capacity);
660 }
661
662 memcpy(s->buffer + s->buffer_size, buf, size);
663 s->buffer_size += size;
664
665 return size;
666 }
667
668 static int buffered_close(void *opaque)
669 {
670 QEMUFileBuffered *s = opaque;
671 ssize_t ret = 0;
672 int ret2;
673
674 DPRINTF("closing\n");
675
676 s->xfer_limit = INT_MAX;
677 while (!qemu_file_get_error(s->file) && s->buffer_size) {
678 ret = buffered_flush(s);
679 if (ret < 0) {
680 break;
681 }
682 }
683
684 ret2 = migrate_fd_close(s->migration_state);
685 if (ret >= 0) {
686 ret = ret2;
687 }
688 ret = migrate_fd_close(s->migration_state);
689 s->migration_state->complete = true;
690 return ret;
691 }
692
693 static int buffered_get_fd(void *opaque)
694 {
695 QEMUFileBuffered *s = opaque;
696
697 return qemu_get_fd(s->file);
698 }
699
700 /*
701 * The meaning of the return values is:
702 * 0: We can continue sending
703 * 1: Time to stop
704 * negative: There has been an error
705 */
706 static int buffered_rate_limit(void *opaque)
707 {
708 QEMUFileBuffered *s = opaque;
709 int ret;
710
711 ret = qemu_file_get_error(s->file);
712 if (ret) {
713 return ret;
714 }
715
716 if (s->bytes_xfer > s->xfer_limit) {
717 return 1;
718 }
719
720 return 0;
721 }
722
723 static int64_t buffered_set_rate_limit(void *opaque, int64_t new_rate)
724 {
725 QEMUFileBuffered *s = opaque;
726 if (qemu_file_get_error(s->file)) {
727 goto out;
728 }
729 if (new_rate > SIZE_MAX) {
730 new_rate = SIZE_MAX;
731 }
732
733 s->xfer_limit = new_rate / 10;
734
735 out:
736 return s->xfer_limit;
737 }
738
739 static int64_t buffered_get_rate_limit(void *opaque)
740 {
741 QEMUFileBuffered *s = opaque;
742
743 return s->xfer_limit;
744 }
745
746 /* 100ms xfer_limit is the limit that we should write each 100ms */
747 #define BUFFER_DELAY 100
748
749 static void *buffered_file_thread(void *opaque)
750 {
751 QEMUFileBuffered *s = opaque;
752 int64_t initial_time = qemu_get_clock_ms(rt_clock);
753 int64_t max_size = 0;
754 bool last_round = false;
755
756 while (true) {
757 int64_t current_time = qemu_get_clock_ms(rt_clock);
758
759 if (s->migration_state->complete) {
760 break;
761 }
762 if (current_time >= initial_time + BUFFER_DELAY) {
763 uint64_t transferred_bytes = s->bytes_xfer;
764 uint64_t time_spent = current_time - initial_time;
765 double bandwidth = transferred_bytes / time_spent;
766 max_size = bandwidth * migrate_max_downtime() / 1000000;
767
768 DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64
769 " bandwidth %g max_size %" PRId64 "\n",
770 transferred_bytes, time_spent, bandwidth, max_size);
771
772 s->bytes_xfer = 0;
773 initial_time = current_time;
774 }
775 if (!last_round && (s->bytes_xfer >= s->xfer_limit)) {
776 /* usleep expects microseconds */
777 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
778 }
779 if (buffered_flush(s) < 0) {
780 break;
781 }
782
783 DPRINTF("file is ready\n");
784 if (s->bytes_xfer < s->xfer_limit) {
785 DPRINTF("notifying client\n");
786 last_round = migrate_fd_put_ready(s->migration_state, max_size);
787 }
788 }
789
790 g_free(s->buffer);
791 g_free(s);
792 return NULL;
793 }
794
795 static const QEMUFileOps buffered_file_ops = {
796 .get_fd = buffered_get_fd,
797 .put_buffer = buffered_put_buffer,
798 .close = buffered_close,
799 .rate_limit = buffered_rate_limit,
800 .get_rate_limit = buffered_get_rate_limit,
801 .set_rate_limit = buffered_set_rate_limit,
802 };
803
804 void qemu_fopen_ops_buffered(MigrationState *migration_state)
805 {
806 QEMUFileBuffered *s;
807
808 s = g_malloc0(sizeof(*s));
809
810 s->migration_state = migration_state;
811 s->xfer_limit = migration_state->bandwidth_limit / 10;
812 s->migration_state->complete = false;
813
814 s->file = qemu_fopen_ops(s, &buffered_file_ops);
815
816 migration_state->file = s->file;
817
818 qemu_thread_create(&s->thread, buffered_file_thread, s,
819 QEMU_THREAD_DETACHED);
820 }