]> git.proxmox.com Git - qemu-server.git/commitdiff
implement suspend to disk for running vms
authorDominik Csapak <d.csapak@proxmox.com>
Thu, 14 Mar 2019 16:04:47 +0000 (17:04 +0100)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Tue, 19 Mar 2019 07:53:14 +0000 (08:53 +0100)
the idea is to have the same logic as with snapshots, but without
the snapshotting of the disks, and after saving the vm state (incl memory),
we hard shut off the guest.

this way the disks will not be touched anymore by the guest

to prevent any alteration of the vm (incl migration, hw changes, etc) we
add a config lock 'suspend'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
PVE/QemuConfig.pm
PVE/QemuServer.pm

index 61a018e2222f1e0179b753afd06ffe2bf1af4d9f..6693585016fb7f53ce485d4aa8940aee6a85f581 100644 (file)
@@ -116,9 +116,7 @@ sub get_replicatable_volumes {
 }
 
 sub __snapshot_save_vmstate {
-    my ($class, $vmid, $conf, $snapname, $storecfg) = @_;
-
-    my $snap = $conf->{snapshots}->{$snapname};
+    my ($class, $vmid, $conf, $snapname, $storecfg, $suspend) = @_;
 
     # first, use explicitly configured storage
     my $target = $conf->{vmstatestorage};
@@ -140,12 +138,24 @@ sub __snapshot_save_vmstate {
 
     my $driver_state_size = 500; # assume 500MB is enough to safe all driver state;
     my $size = $conf->{memory}*2 + $driver_state_size;
+    my $scfg = PVE::Storage::storage_config($storecfg, $target);
 
     my $name = "vm-$vmid-state-$snapname";
-    my $scfg = PVE::Storage::storage_config($storecfg, $target);
     $name .= ".raw" if $scfg->{path}; # add filename extension for file base storage
-    $snap->{vmstate} = PVE::Storage::vdisk_alloc($storecfg, $target, $vmid, 'raw', $name, $size*1024);
-    $snap->{runningmachine} = PVE::QemuServer::get_current_qemu_machine($vmid);
+
+    my $statefile = PVE::Storage::vdisk_alloc($storecfg, $target, $vmid, 'raw', $name, $size*1024);
+    my $runningmachine = PVE::QemuServer::get_current_qemu_machine($vmid);
+
+    if ($suspend) {
+       $conf->{vmstate} = $statefile;
+       $conf->{runningmachine} = $runningmachine;
+    } else {
+       my $snap = $conf->{snapshots}->{$snapname};
+       $snap->{vmstate} = $statefile;
+       $snap->{runningmachine} = $runningmachine;
+    }
+
+    return $statefile;
 }
 
 sub __snapshot_check_running {
index deac8e2d0f11d4c48d252671fed4b923af6e56f9..1f12f37f5c6e660a9b53aa1304feff13d0d7b573 100644 (file)
@@ -300,7 +300,7 @@ my $confdesc = {
        optional => 1,
        type => 'string',
        description => "Lock/unlock the VM.",
-       enum => [qw(backup clone create migrate rollback snapshot snapshot-delete)],
+       enum => [qw(backup clone create migrate rollback snapshot snapshot-delete suspending suspended)],
     },
     cpulimit => {
        optional => 1,
@@ -5658,17 +5658,83 @@ sub vm_stop {
 }
 
 sub vm_suspend {
-    my ($vmid, $skiplock) = @_;
+    my ($vmid, $skiplock, $includestate) = @_;
+
+    my $conf;
+    my $path;
+    my $storecfg;
+    my $vmstate;
 
     PVE::QemuConfig->lock_config($vmid, sub {
 
-       my $conf = PVE::QemuConfig->load_config($vmid);
+       $conf = PVE::QemuConfig->load_config($vmid);
 
+       my $is_backing_up = PVE::QemuConfig->has_lock($conf, 'backup');
        PVE::QemuConfig->check_lock($conf)
-           if !($skiplock || PVE::QemuConfig->has_lock($conf, 'backup'));
+           if !($skiplock || $is_backing_up);
 
-       vm_mon_cmd($vmid, "stop");
+       die "cannot suspend to disk during backup\n"
+           if $is_backing_up && $includestate;
+
+       if ($includestate) {
+           $conf->{lock} = 'suspending';
+           my $date = strftime("%Y-%m-%d", localtime(time()));
+           $storecfg = PVE::Storage::config();
+           $vmstate = PVE::QemuConfig->__snapshot_save_vmstate($vmid, $conf, "suspend-$date", $storecfg, 1);
+           $path = PVE::Storage::path($storecfg, $vmstate);
+           PVE::QemuConfig->write_config($vmid, $conf);
+       } else {
+           vm_mon_cmd($vmid, "stop");
+       }
     });
+
+    if ($includestate) {
+       # save vm state
+       PVE::Storage::activate_volumes($storecfg, [$vmstate]);
+
+       eval {
+           vm_mon_cmd($vmid, "savevm-start", statefile => $path);
+           for(;;) {
+               my $state = vm_mon_cmd_nocheck($vmid, "query-savevm");
+               if (!$state->{status}) {
+                   die "savevm not active\n";
+               } elsif ($state->{status} eq 'active') {
+                   sleep(1);
+                   next;
+               } elsif ($state->{status} eq 'completed') {
+                   last;
+               } elsif ($state->{status} eq 'failed' && $state->{error}) {
+                   die "query-savevm failed with error '$state->{error}'\n"
+               } else {
+                   die "query-savevm returned status '$state->{status}'\n";
+               }
+           }
+       };
+       my $err = $@;
+
+       PVE::QemuConfig->lock_config($vmid, sub {
+           $conf = PVE::QemuConfig->load_config($vmid);
+           if ($err) {
+               # cleanup, but leave suspending lock, to indicate something went wrong
+               eval {
+                   vm_mon_cmd($vmid, "savevm-end");
+                   PVE::Storage::deactivate_volumes($storecfg, [$vmstate]);
+                   PVE::Storage::vdisk_free($storecfg, $vmstate);
+                   delete $conf->@{qw(vmstate runningmachine)};
+                   PVE::QemuConfig->write_config($vmid, $conf);
+               };
+               warn $@ if $@;
+               die $err;
+           }
+
+           die "lock changed unexpectedly\n"
+               if !PVE::QemuConfig->has_lock($conf, 'suspending');
+
+           vm_qmp_command($vmid, { execute => "quit" });
+           $conf->{lock} = 'suspended';
+           PVE::QemuConfig->write_config($vmid, $conf);
+       });
+    }
 }
 
 sub vm_resume {