#define RAM_SAVE_FLAG_EOS 0x10
#define RAM_SAVE_FLAG_CONTINUE 0x20
#define RAM_SAVE_FLAG_XBZRLE 0x40
+/* 0x80 is reserved in migration.h start with 0x100 next */
static struct defconfig_file {
ram_bulk_stage = false;
}
} else {
+ int ret;
uint8_t *p;
int cont = (block == last_sent_block) ?
RAM_SAVE_FLAG_CONTINUE : 0;
/* In doubt sent page as normal */
bytes_sent = -1;
- if (is_zero_page(p)) {
+ ret = ram_control_save_page(f, block->offset,
+ offset, TARGET_PAGE_SIZE, &bytes_sent);
+
+ if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
+ if (ret != RAM_SAVE_CONTROL_DELAYED) {
+ if (bytes_sent > 0) {
+ acct_info.norm_pages++;
+ } else if (bytes_sent == 0) {
+ acct_info.dup_pages++;
+ }
+ }
+ } else if (is_zero_page(p)) {
acct_info.dup_pages++;
bytes_sent = save_block_hdr(f, block, offset, cont,
RAM_SAVE_FLAG_COMPRESS);
}
qemu_mutex_unlock_ramlist();
+
+ ram_control_before_iterate(f, RAM_CONTROL_SETUP);
+ ram_control_after_iterate(f, RAM_CONTROL_SETUP);
+
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
reset_ram_globals();
}
+ ram_control_before_iterate(f, RAM_CONTROL_ROUND);
+
t0 = qemu_get_clock_ns(rt_clock);
i = 0;
while ((ret = qemu_file_rate_limit(f)) == 0) {
qemu_mutex_unlock_ramlist();
+ /*
+ * Must occur before EOS (or any QEMUFile operation)
+ * because of RDMA protocol.
+ */
+ ram_control_after_iterate(f, RAM_CONTROL_ROUND);
+
if (ret < 0) {
bytes_transferred += total_sent;
return ret;
qemu_mutex_lock_ramlist();
migration_bitmap_sync();
+ ram_control_before_iterate(f, RAM_CONTROL_FINISH);
+
/* try transferring iterative blocks of memory */
/* flush all remaining blocks regardless of rate limiting */
}
bytes_transferred += bytes_sent;
}
+
+ ram_control_after_iterate(f, RAM_CONTROL_FINISH);
migration_end();
qemu_mutex_unlock_ramlist();
ret = -EINVAL;
goto done;
}
+ } else if (flags & RAM_SAVE_FLAG_HOOK) {
+ ram_control_load_hook(f, flags);
}
error = qemu_file_get_error(f);
if (error) {