* (Set during postcopy)
*/
#define RAM_UF_ZEROPAGE (1 << 3)
+
+/* RAM can be migrated */
+#define RAM_MIGRATABLE (1 << 4)
#endif
#ifdef TARGET_PAGE_BITS_VARY
rb->flags |= RAM_UF_ZEROPAGE;
}
+bool qemu_ram_is_migratable(RAMBlock *rb)
+{
+ return rb->flags & RAM_MIGRATABLE;
+}
+
+void qemu_ram_set_migratable(RAMBlock *rb)
+{
+ rb->flags |= RAM_MIGRATABLE;
+}
+
+void qemu_ram_unset_migratable(RAMBlock *rb)
+{
+ rb->flags &= ~RAM_MIGRATABLE;
+}
+
/* Called with iothread lock held. */
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
{
return ret;
}
+int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque)
+{
+ RAMBlock *block;
+ int ret = 0;
+
+ rcu_read_lock();
+ RAMBLOCK_FOREACH(block) {
+ if (!qemu_ram_is_migratable(block)) {
+ continue;
+ }
+ ret = func(block->idstr, block->host, block->offset,
+ block->used_length, opaque);
+ if (ret) {
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
/*
* Unmap pages of memory from start to start+length such that
* they a) read as 0, b) Trigger whatever fault mechanism
}
type_init(machvirt_machine_init);
+#define VIRT_COMPAT_2_12 \
+ HW_COMPAT_2_12
+
static void virt_2_12_instance_init(Object *obj)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
static void virt_machine_2_12_options(MachineClass *mc)
{
+ SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_12);
}
DEFINE_VIRT_MACHINE_AS_LATEST(2, 12)
pc_i440fx_machine_options(m);
m->alias = "pc";
m->is_default = 1;
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_12);
}
DEFINE_I440FX_MACHINE(v3_0, "pc-i440fx-3.0", NULL,
{
pc_q35_machine_options(m);
m->alias = "q35";
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_12);
}
DEFINE_Q35_MACHINE(v3_0, "pc-q35-3.0", NULL,
bool qemu_ram_is_shared(RAMBlock *rb);
bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
void qemu_ram_set_uf_zeroable(RAMBlock *rb);
+bool qemu_ram_is_migratable(RAMBlock *rb);
+void qemu_ram_set_migratable(RAMBlock *rb);
+void qemu_ram_unset_migratable(RAMBlock *rb);
size_t qemu_ram_pagesize(RAMBlock *block);
size_t qemu_ram_pagesize_largest(void);
ram_addr_t offset, ram_addr_t length, void *opaque);
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
+int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque);
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
#endif
#ifndef HW_COMPAT_H
#define HW_COMPAT_H
-#define HW_COMPAT_2_12
+#define HW_COMPAT_2_12 \
+ {\
+ .driver = "migration",\
+ .property = "decompress-error-check",\
+ .value = "off",\
+ },
#define HW_COMPAT_2_11 \
{\
}
}
+static bool migrate_late_block_activate(void)
+{
+ MigrationState *s;
+
+ s = migrate_get_current();
+
+ return s->enabled_capabilities[
+ MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
+}
+
/*
* Called on -incoming with a defer: uri.
* The migration can be started later after any parameters have been
Error *local_err = NULL;
MigrationIncomingState *mis = opaque;
- /* Make sure all file formats flush their mutable metadata.
- * If we get an error here, just don't restart the VM yet. */
- bdrv_invalidate_cache_all(&local_err);
- if (local_err) {
- error_report_err(local_err);
- local_err = NULL;
- autostart = false;
+ /* If capability late_block_activate is set:
+ * Only fire up the block code now if we're going to restart the
+ * VM, else 'cont' will do it.
+ * This causes file locking to happen; so we don't want it to happen
+ * unless we really are starting the VM.
+ */
+ if (!migrate_late_block_activate() ||
+ (autostart && (!global_state_received() ||
+ global_state_get_runstate() == RUN_STATE_RUNNING))) {
+ /* Make sure all file formats flush their mutable metadata.
+ * If we get an error here, just don't restart the VM yet. */
+ bdrv_invalidate_cache_all(&local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ local_err = NULL;
+ autostart = false;
+ }
}
/*
ms->send_configuration ? "on" : "off");
monitor_printf(mon, "send-section-footer: %s\n",
ms->send_section_footer ? "on" : "off");
+ monitor_printf(mon, "decompress-error-check: %s\n",
+ ms->decompress_error_check ? "on" : "off");
}
#define DEFINE_PROP_MIG_CAP(name, x) \
send_configuration, true),
DEFINE_PROP_BOOL("send-section-footer", MigrationState,
send_section_footer, true),
+ DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
+ decompress_error_check, true),
/* Migration parameters */
DEFINE_PROP_UINT8("x-compress-level", MigrationState,
/* Needed by postcopy-pause state */
QemuSemaphore postcopy_pause_sem;
QemuSemaphore postcopy_pause_rp_sem;
+ /*
+ * Whether we abort the migration if decompression errors are
+ * detected at the destination. It is left at false for qemu
+ * older than 3.0, since only newer qemu sends streams that
+ * do not trigger spurious decompression errors.
+ */
+ bool decompress_error_check;
};
void migrate_set_state(int *state, int old_state, int new_state);
}
/* We don't support postcopy with shared RAM yet */
- if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) {
+ if (qemu_ram_foreach_migratable_block(test_ramblock_postcopiable, NULL)) {
goto out;
}
*/
int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
{
- if (qemu_ram_foreach_block(init_range, NULL)) {
+ if (qemu_ram_foreach_migratable_block(init_range, NULL)) {
return -1;
}
return -1;
}
- if (qemu_ram_foreach_block(cleanup_range, mis)) {
+ if (qemu_ram_foreach_migratable_block(cleanup_range, mis)) {
return -1;
}
/* Let the fault thread quit */
*/
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
{
- if (qemu_ram_foreach_block(nhp_range, mis)) {
+ if (qemu_ram_foreach_migratable_block(nhp_range, mis)) {
return -1;
}
/*
* Mark the given area of RAM as requiring notification to unwritten areas
- * Used as a callback on qemu_ram_foreach_block.
+ * Used as a callback on qemu_ram_foreach_migratable_block.
* host_addr: Base of area to mark
* offset: Offset in the whole ram arena
* length: Length of the section
mis->have_fault_thread = true;
/* Mark so that we get notified of accesses to unwritten areas */
- if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
+ if (qemu_ram_foreach_migratable_block(ram_block_enable_notify, mis)) {
return -1;
}
return ret;
}
+/* Should be holding either ram_list.mutex, or the RCU lock. */
+#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
+ RAMBLOCK_FOREACH(block) \
+ if (!qemu_ram_is_migratable(block)) {} else
+
static void ramblock_recv_map_init(void)
{
RAMBlock *rb;
- RAMBLOCK_FOREACH(rb) {
+ RAMBLOCK_FOREACH_MIGRATABLE(rb) {
assert(!rb->receivedmap);
rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
}
unsigned long *bitmap = rb->bmap;
unsigned long next;
+ if (!qemu_ram_is_migratable(rb)) {
+ return size;
+ }
+
if (rs->ram_bulk_stage && start > 0) {
next = start + 1;
} else {
RAMBlock *block;
uint64_t summary = 0;
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
summary |= block->page_size;
}
qemu_mutex_lock(&rs->bitmap_mutex);
rcu_read_lock();
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
migration_bitmap_sync_range(rs, block, 0, block->used_length);
}
rcu_read_unlock();
size_t pagesize_bits =
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
+ if (!qemu_ram_is_migratable(pss->block)) {
+ error_report("block %s should not be migrated !", pss->block->idstr);
+ return 0;
+ }
+
do {
/* Check the pages is dirty and if it is send it */
if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
uint64_t total = 0;
rcu_read_lock();
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
total += block->used_length;
}
rcu_read_unlock();
*/
memory_global_dirty_log_stop();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
g_free(block->bmap);
block->bmap = NULL;
g_free(block->unsentmap);
{
struct RAMBlock *block;
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
unsigned long *bitmap = block->bmap;
unsigned long range = block->used_length >> TARGET_PAGE_BITS;
unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
struct RAMBlock *block;
int ret;
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
PostcopyDiscardState *pds =
postcopy_discard_send_init(ms, block->idstr);
rs->last_sent_block = NULL;
rs->last_page = 0;
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
unsigned long *bitmap = block->bmap;
unsigned long *unsentmap = block->unsentmap;
/* Skip setting bitmap if there is no RAM */
if (ram_bytes_total()) {
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
pages = block->max_length >> TARGET_PAGE_BITS;
block->bmap = bitmap_new(pages);
bitmap_set(block->bmap, 0, pages);
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
qemu_put_be64(f, block->used_length);
return NULL;
}
+ if (!qemu_ram_is_migratable(block)) {
+ error_report("block %s should not be migrated !", id);
+ return NULL;
+ }
+
return block;
}
ret = qemu_uncompress_data(¶m->stream, des, pagesize,
param->compbuf, len);
- if (ret < 0) {
+ if (ret < 0 && migrate_get_current()->decompress_error_check) {
error_report("decompress data failed");
qemu_file_set_error(decomp_file, ret);
}
xbzrle_load_cleanup();
compress_threads_load_cleanup();
- RAMBLOCK_FOREACH(rb) {
+ RAMBLOCK_FOREACH_MIGRATABLE(rb) {
g_free(rb->receivedmap);
rb->receivedmap = NULL;
}
length = qemu_get_be64(f);
block = qemu_ram_block_by_name(id);
- if (block) {
+ if (block && !qemu_ram_is_migratable(block)) {
+ error_report("block %s should not be migrated !", id);
+ ret = -EINVAL;
+ } else if (block) {
if (length != block->used_length) {
Error *local_err = NULL;
QIOChannel parent;
RDMAContext *rdma;
QEMUFile *file;
- size_t len;
bool blocking; /* XXX we don't actually honour this yet */
};
static void qemu_rdma_cleanup(RDMAContext *rdma)
{
- struct rdma_cm_event *cm_event;
- int ret, idx;
+ int idx;
if (rdma->cm_id && rdma->connected) {
if ((rdma->error_state ||
qemu_rdma_post_send_control(rdma, NULL, &head);
}
- ret = rdma_disconnect(rdma->cm_id);
- if (!ret) {
- trace_qemu_rdma_cleanup_waiting_for_disconnect();
- ret = rdma_get_cm_event(rdma->channel, &cm_event);
- if (!ret) {
- rdma_ack_cm_event(cm_event);
- }
- }
+ rdma_disconnect(rdma->cm_id);
trace_qemu_rdma_cleanup_disconnect();
rdma->connected = false;
}
int ret;
ssize_t done = 0;
size_t i;
+ size_t len = 0;
CHECK_ERROR_STATE();
while (remaining) {
RDMAControlHeader head;
- rioc->len = MIN(remaining, RDMA_SEND_INCREMENT);
- remaining -= rioc->len;
+ len = MIN(remaining, RDMA_SEND_INCREMENT);
+ remaining -= len;
- head.len = rioc->len;
+ head.len = len;
head.type = RDMA_CONTROL_QEMU_FILE;
ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL);
return ret;
}
- data += rioc->len;
- done += rioc->len;
+ data += len;
+ done += len;
}
}
}
}
}
- rioc->len = done;
- return rioc->len;
+ return done;
}
/*
{
qemu_ram_set_idstr(mr->ram_block,
memory_region_name(mr), dev);
+ qemu_ram_set_migratable(mr->ram_block);
}
void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev)
{
qemu_ram_unset_idstr(mr->ram_block);
+ qemu_ram_unset_migratable(mr->ram_block);
}
void vmstate_register_ram_global(MemoryRegion *mr)
qemu_rdma_accept_pin_verbsc(void *verbs) "Verbs context after listen: %p"
qemu_rdma_block_for_wrid_miss(const char *wcompstr, int wcomp, const char *gcompstr, uint64_t req) "A Wanted wrid %s (%d) but got %s (%" PRIu64 ")"
qemu_rdma_cleanup_disconnect(void) ""
-qemu_rdma_cleanup_waiting_for_disconnect(void) ""
qemu_rdma_close(void) ""
qemu_rdma_connect_pin_all_requested(void) ""
qemu_rdma_connect_pin_all_outcome(bool pin) "%d"
# @postcopy-blocktime: Calculate downtime for postcopy live migration
# (since 3.0)
#
+# @late-block-activate: If enabled, the destination will not activate block
+# devices (and thus take locks) immediately at the end of migration.
+# (since 3.0)
+#
# Since: 1.2
##
{ 'enum': 'MigrationCapability',
'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram',
'block', 'return-path', 'pause-before-switchover', 'x-multifd',
- 'dirty-bitmaps', 'postcopy-blocktime' ] }
+ 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate' ] }
##
# @MigrationCapabilityStatus: