.version_id = 1,
.minimum_version_id = 1,
.needed = vmstate_target_page_bits_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINT32(target_page_bits, SaveState),
VMSTATE_END_OF_LIST()
}
.version_id = 1,
.minimum_version_id = 1,
.needed = vmstate_capabilites_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINT32_V(caps_count, SaveState, 1),
VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1,
vmstate_info_capability,
static int vmstate_uuid_post_load(void *opaque, int version_id)
{
SaveState *state = opaque;
- char uuid_src[UUID_FMT_LEN + 1];
- char uuid_dst[UUID_FMT_LEN + 1];
+ char uuid_src[UUID_STR_LEN];
+ char uuid_dst[UUID_STR_LEN];
if (!qemu_uuid_set) {
/*
.minimum_version_id = 1,
.needed = vmstate_uuid_needed,
.post_load = vmstate_uuid_post_load,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1),
VMSTATE_END_OF_LIST()
}
.post_load = configuration_post_load,
.pre_save = configuration_pre_save,
.post_save = configuration_post_save,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINT32(len, SaveState),
VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len),
VMSTATE_END_OF_LIST()
},
- .subsections = (const VMStateDescription *[]) {
+ .subsections = (const VMStateDescription * const []) {
&vmstate_target_page_bits,
&vmstate_capabilites,
&vmstate_uuid,
}
static void dump_vmstate_vmss(FILE *out_file,
- const VMStateDescription **subsection,
+ const VMStateDescription *subsection,
int indent)
{
- if (*subsection != NULL) {
- dump_vmstate_vmsd(out_file, *subsection, indent, true);
+ if (subsection != NULL) {
+ dump_vmstate_vmsd(out_file, subsection, indent, true);
}
}
fprintf(out_file, "\n%*s]", indent, "");
}
if (vmsd->subsections != NULL) {
- const VMStateDescription **subsection = vmsd->subsections;
+ const VMStateDescription * const *subsection = vmsd->subsections;
bool first;
fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, "");
if (!first) {
fprintf(out_file, ",\n");
}
- dump_vmstate_vmss(out_file, subsection, indent + 2);
+ dump_vmstate_vmss(out_file, *subsection, indent + 2);
subsection++;
first = false;
}
static void vmstate_check(const VMStateDescription *vmsd)
{
const VMStateField *field = vmsd->fields;
- const VMStateDescription **subsection = vmsd->subsections;
+ const VMStateDescription * const *subsection = vmsd->subsections;
if (field) {
while (field->name) {
qemu_bh_delete(mis->bh);
trace_vmstate_downtime_checkpoint("dst-postcopy-bh-vm-started");
+ object_unref(OBJECT(migration_get_current()));
}
/* After all discards we can start running and asking for pages */
postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis);
+ object_ref(OBJECT(migration_get_current()));
qemu_bh_schedule(mis->bh);
/* We need to finish reading the stream from the package
QEMUSnapshotInfo sn1, *sn = &sn1;
int ret = -1, ret2;
QEMUFile *f;
- int saved_vm_running;
+ RunState saved_state = runstate_get();
uint64_t vm_state_size;
g_autoptr(GDateTime) now = g_date_time_new_now_local();
- AioContext *aio_context;
GLOBAL_STATE_CODE();
if (bs == NULL) {
return false;
}
- aio_context = bdrv_get_aio_context(bs);
-
- saved_vm_running = runstate_is_running();
global_state_store();
vm_stop(RUN_STATE_SAVE_VM);
bdrv_drain_all_begin();
- aio_context_acquire(aio_context);
-
memset(sn, 0, sizeof(*sn));
/* fill auxiliary fields */
goto the_end;
}
- /* The bdrv_all_create_snapshot() call that follows acquires the AioContext
- * for itself. BDRV_POLL_WHILE() does not support nested locking because
- * it only releases the lock once. Therefore synchronous I/O will deadlock
- * unless we release the AioContext before bdrv_all_create_snapshot().
- */
- aio_context_release(aio_context);
- aio_context = NULL;
-
ret = bdrv_all_create_snapshot(sn, bs, vm_state_size,
has_devices, devices, errp);
if (ret < 0) {
ret = 0;
the_end:
- if (aio_context) {
- aio_context_release(aio_context);
- }
-
bdrv_drain_all_end();
- if (saved_vm_running) {
- vm_start();
- }
+ vm_resume(saved_state);
return ret == 0;
}
QEMUSnapshotInfo sn;
QEMUFile *f;
int ret;
- AioContext *aio_context;
MigrationIncomingState *mis = migration_incoming_get_current();
if (!bdrv_all_can_snapshot(has_devices, devices, errp)) {
if (!bs_vm_state) {
return false;
}
- aio_context = bdrv_get_aio_context(bs_vm_state);
/* Don't even try to load empty VM states */
- aio_context_acquire(aio_context);
ret = bdrv_snapshot_find(bs_vm_state, &sn, name);
- aio_context_release(aio_context);
if (ret < 0) {
return false;
} else if (sn.vm_state_size == 0) {
ret = -EINVAL;
goto err_drain;
}
- aio_context_acquire(aio_context);
ret = qemu_loadvm_state(f);
migration_incoming_state_destroy();
- aio_context_release(aio_context);
bdrv_drain_all_end();
return false;
}
+void load_snapshot_resume(RunState state)
+{
+ vm_resume(state);
+ if (state == RUN_STATE_RUNNING && runstate_get() == RUN_STATE_SUSPENDED) {
+ qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, &error_abort);
+ }
+}
+
bool delete_snapshot(const char *name, bool has_devices,
strList *devices, Error **errp)
{
{
Job *job = opaque;
SnapshotJob *s = container_of(job, SnapshotJob, common);
- int orig_vm_running;
+ RunState orig_state = runstate_get();
job_progress_set_remaining(&s->common, 1);
- orig_vm_running = runstate_is_running();
vm_stop(RUN_STATE_RESTORE_VM);
s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp);
- if (s->ret && orig_vm_running) {
- vm_start();
+ if (s->ret) {
+ load_snapshot_resume(orig_state);
}
job_progress_update(&s->common, 1);