use PVE::Exception qw(raise raise_param_exc);
use PVE::Storage;
use PVE::Tools qw(run_command lock_file file_read_firstline);
+use PVE::JSONSchema qw(get_standard_option);
use PVE::Cluster qw(cfs_register_file cfs_read_file cfs_write_file cfs_lock_file);
use PVE::INotify;
use PVE::ProcFSTools;
enum => [ qw(486 athlon pentium pentium2 pentium3 coreduo core2duo kvm32 kvm64 qemu32 qemu64 phenom cpu64-rhel6 cpu64-rhel5 Conroe Penryn Nehalem Westmere Opteron_G1 Opteron_G2 Opteron_G3 host) ],
default => 'qemu64',
},
+ parent => get_standard_option('pve-snapshot-name', {
+ optional => 1,
+ description => "Parent snapshot name. This is used internally, and should not be modified.",
+ }),
+ snaptime => {
+ optional => 1,
+ description => "Timestamp for snapshots.",
+ type => 'integer',
+ minimum => 0,
+ },
+ vmstate => {
+ optional => 1,
+ type => 'string', format => 'pve-volume-id',
+ description => "Reference to a volume which stores the VM state. This is used internally for snapshots.",
+ },
};
# what about other qemu settings ?
my $prop = shift;
foreach my $opt (keys %$confdesc) {
+ next if $opt eq 'parent' || $opt eq 'snaptime' || $opt eq 'vmstate';
$prop->{$opt} = $confdesc->{$opt};
}
if ($line =~ m/^\[([a-z][a-z0-9_\-]+)\]\s*$/i) {
my $snapname = $1;
$conf->{description} = $descr if $descr;
- my $descr = '';
+ $descr = '';
$conf = $res->{snapshots}->{$snapname} = {};
next;
}
if ($line =~ m/^(description):\s*(.*\S)\s*$/) {
$descr .= PVE::Tools::decode_text($2);
- } elsif ($line =~ m/parent:\s*([a-z][a-z0-9_\-]+)\s*$/) {
- $conf->{parent} = $1;
} elsif ($line =~ m/snapstate:\s*(prepare|delete)\s*$/) {
$conf->{snapstate} = $1;
} elsif ($line =~ m/^(args):\s*(.*\S)\s*$/) {
delete $conf->{smp};
}
- # fixme: unused drives and snapshots??!!
+ my $used_volids = {};
- my $new_volids = {};
- foreach my $key (keys %$conf) {
- next if $key eq 'digest' || $key eq 'description' || $key eq 'snapshots';
- my $value = $conf->{$key};
- eval { $value = check_type($key, $value); };
- die "unable to parse value of '$key' - $@" if $@;
+ my $cleanup_config = sub {
+ my ($cref) = @_;
+
+ foreach my $key (keys %$cref) {
+ next if $key eq 'digest' || $key eq 'description' || $key eq 'snapshots' ||
+ $key eq 'snapstate';
+ my $value = $cref->{$key};
+ eval { $value = check_type($key, $value); };
+ die "unable to parse value of '$key' - $@" if $@;
- $conf->{$key} = $value;
+ $cref->{$key} = $value;
- if (valid_drivename($key)) {
- my $drive = PVE::QemuServer::parse_drive($key, $value);
- $new_volids->{$drive->{file}} = 1 if $drive && $drive->{file};
+ if (valid_drivename($key)) {
+ my $drive = PVE::QemuServer::parse_drive($key, $value);
+ $used_volids->{$drive->{file}} = 1 if $drive && $drive->{file};
+ }
}
+ };
+
+ &$cleanup_config($conf);
+ foreach my $snapname (keys %{$conf->{snapshots}}) {
+ &$cleanup_config($conf->{snapshots}->{$snapname});
}
# remove 'unusedX' settings if we re-add a volume
foreach my $key (keys %$conf) {
my $value = $conf->{$key};
- if ($key =~ m/^unused/ && $new_volids->{$value}) {
+ if ($key =~ m/^unused/ && $used_volids->{$value}) {
delete $conf->{$key};
}
}
-
+
my $generate_raw_config = sub {
my ($conf) = @_;
sub qemu_volume_snapshot_delete {
my ($vmid, $deviceid, $storecfg, $volid, $snap) = @_;
- #need to implement statefile location
- my $statefile="/tmp/$vmid-$snap";
-
- unlink $statefile if -e $statefile;
-
my $running = PVE::QemuServer::check_running($vmid);
return if !PVE::Storage::volume_snapshot_delete($storecfg, $volid, $snap, $running);
return if !$running;
- #need to split delvm monitor command like savevm
-
-}
-
-sub qemu_snapshot_start {
- my ($vmid, $snap) = @_;
-
- #need to implement statefile location
- my $statefile="/tmp/$vmid-$snap";
-
- vm_mon_cmd($vmid, "snapshot-start", statefile => $statefile);
-
-}
-
-sub qemu_snapshot_end {
- my ($vmid) = @_;
-
- vm_mon_cmd($vmid, "snapshot-end");
-
+ vm_mon_cmd($vmid, "delete-drive-snapshot", device => $deviceid, name => $snap);
}
sub qga_freezefs {
if ($statefile eq 'tcp') {
print "migration listens on port $migrate_port\n";
} else {
- unlink $statefile;
- # fixme: send resume - is that necessary ?
- eval { vm_mon_cmd($vmid, "cont"); };
+ if ($migratedfrom) {
+ unlink $statefile;
+ # fixme: send resume - is that necessary ?
+ eval { vm_mon_cmd($vmid, "cont"); };
+ }
}
}
foreach my $k (keys %$source) {
next if $k eq 'snapshots';
+ next if $k eq 'snapstate';
+ next if $k eq 'snaptime';
+ next if $k eq 'vmstate';
next if $k eq 'lock';
next if $k eq 'digest';
+ next if $k eq 'description';
next if $k =~ m/^unused\d+$/;
$dest->{$k} = $source->{$k};
snapshots => $conf->{snapshots},
};
- # keep list of unused disks
+ # keep description and list of unused disks
foreach my $k (keys %$conf) {
- next if $k !~ m/^unused\d+$/;
+ next if !($k =~ m/^unused\d+$/ || $k eq 'description');
$newconf->{$k} = $conf->{$k};
}
return $newconf;
};
+sub foreach_writable_storage {
+ my ($conf, $func) = @_;
+
+ my $sidhash = {};
+
+ foreach my $ds (keys %$conf) {
+ next if !valid_drivename($ds);
+
+ my $drive = parse_drive($ds, $conf->{$ds});
+ next if !$drive;
+ next if drive_is_cdrom($drive);
+
+ my $volid = $drive->{file};
+
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ $sidhash->{$sid} = $sid if $sid;
+ }
+
+ foreach my $sid (sort keys %$sidhash) {
+ &$func($sid);
+ }
+}
+
+my $alloc_vmstate_volid = sub {
+ my ($storecfg, $vmid, $conf, $snapname) = @_;
+
+ # Note: we try to be smart when selecting a $target storage
+
+ my $target;
+
+ # search shared storage first
+ foreach_writable_storage($conf, sub {
+ my ($sid) = @_;
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ return if !$scfg->{shared};
+
+ $target = $sid if !$target || $scfg->{path}; # prefer file based storage
+ });
+
+ if (!$target) {
+ # now search local storage
+ foreach_writable_storage($conf, sub {
+ my ($sid) = @_;
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ return if $scfg->{shared};
+
+ $target = $sid if !$target || $scfg->{path}; # prefer file based storage;
+ });
+ }
+
+ $target = 'local' if !$target;
+
+ my $driver_state_size = 32; # assume 32MB is enough to safe all driver state;
+ my $size = $conf->{memory} + $driver_state_size;
+
+ my $name = "vm-$vmid-state-$snapname";
+ my $scfg = PVE::Storage::storage_config($storecfg, $target);
+ $name .= ".raw" if $scfg->{path}; # add filename extension for file base storage
+ my $volid = PVE::Storage::vdisk_alloc($storecfg, $target, $vmid, 'raw', $name, $size*1024);
+
+ return $volid;
+};
+
my $snapshot_prepare = sub {
- my ($vmid, $snapname) = @_;
+ my ($vmid, $snapname, $save_vmstate, $comment) = @_;
my $snap;
die "snapshot name '$snapname' already used\n"
if defined($conf->{snapshots}->{$snapname});
- # fixme: need to implement a check to see if all storages
- # support snapshots
+ my $storecfg = PVE::Storage::config();
- $snap = $conf->{snapshots}->{$snapname} = {
- snapstate => "prepare",
- };
+ foreach_drive($conf, sub {
+ my ($ds, $drive) = @_;
+
+ return if drive_is_cdrom($drive);
+ my $volid = $drive->{file};
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ if ($storeid) {
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ die "can't snapshot volume '$volid'\n"
+ if !(($scfg->{path} && $volname =~ m/\.qcow2$/) ||
+ ($scfg->{type} eq 'rbd') ||
+ ($scfg->{type} eq 'sheepdog'));
+ } elsif ($volid =~ m|^(/.+)$| && -e $volid) {
+ die "snapshot device '$volid' is not possible\n";
+ } else {
+ die "can't snapshot volume '$volid'\n";
+ }
+ });
+
+
+ $snap = $conf->{snapshots}->{$snapname} = {};
+
+ if ($save_vmstate && check_running($vmid)) {
+ $snap->{vmstate} = &$alloc_vmstate_volid($storecfg, $vmid, $conf, $snapname);
+ }
&$snapshot_copy_config($conf, $snap);
+ $snap->{snapstate} = "prepare";
+ $snap->{snaptime} = time();
+ $snap->{description} = $comment if $comment;
+
update_config_nolock($vmid, $conf, 1);
};
if !($snap->{snapstate} && $snap->{snapstate} eq "prepare");
delete $snap->{snapstate};
+ delete $conf->{lock};
my $newconf = &$snapshot_apply_config($conf, $snap);
+ $newconf->{parent} = $snapname;
+
update_config_nolock($vmid, $newconf, 1);
};
my $prepare = 1;
+ my $storecfg = PVE::Storage::config();
+
my $updatefn = sub {
my $conf = load_config($vmid);
- check_lock($conf) if $prepare;
+ if ($prepare) {
+ check_lock($conf);
+ vm_stop($storecfg, $vmid, undef, undef, 5, undef, undef);
+ }
die "unable to rollback vm $vmid: vm is running\n"
if check_running($vmid);
}
update_config_nolock($vmid, $conf, 1);
+
+ if (!$prepare && $snap->{vmstate}) {
+ my $statefile = PVE::Storage::path($storecfg, $snap->{vmstate});
+ # fixme: this only forws for files currently
+ vm_start($storecfg, $vmid, $statefile);
+ }
+
};
lock_config($vmid, $updatefn);
-
- my $storecfg = PVE::Storage::config();
foreach_drive($snap, sub {
my ($ds, $drive) = @_;
my $volid = $drive->{file};
my $device = "drive-$ds";
- qemu_volume_snapshot_rollback($vmid, $device, $storecfg, $volid, $snapname);
+ PVE::Storage::volume_snapshot_rollback($storecfg, $volid, $snapname);
});
$prepare = 0;
}
sub snapshot_create {
- my ($vmid, $snapname, $vmstate, $freezefs) = @_;
+ my ($vmid, $snapname, $save_vmstate, $freezefs, $comment) = @_;
+
+ my $snap = &$snapshot_prepare($vmid, $snapname, $save_vmstate, $comment);
+
+ $freezefs = $save_vmstate = 0 if !$snap->{vmstate}; # vm is not running
- my $snap = &$snapshot_prepare($vmid, $snapname);
+ my $drivehash = {};
+
+ my $running = check_running($vmid);
eval {
# create internal snapshots of all drives
-
- qemu_snapshot_start($vmid, $snapname) if $vmstate;
-
- qga_freezefs($vmid) if $freezefs;
my $storecfg = PVE::Storage::config();
+
+ if ($running) {
+ if ($snap->{vmstate}) {
+ my $path = PVE::Storage::path($storecfg, $snap->{vmstate});
+ vm_mon_cmd($vmid, "snapshot-start", statefile => $path);
+ } else {
+ vm_mon_cmd($vmid, "snapshot-start");
+ }
+ };
+
+ qga_freezefs($vmid) if $running && $freezefs;
foreach_drive($snap, sub {
my ($ds, $drive) = @_;
my $device = "drive-$ds";
qemu_volume_snapshot($vmid, $device, $storecfg, $volid, $snapname);
+ $drivehash->{$ds} = 1;
});
};
my $err = $@;
- eval { gqa_unfreezefs($vmid) if $freezefs; };
+ eval { gqa_unfreezefs($vmid) if $running && $freezefs; };
warn $@ if $@;
- eval { qemu_snapshot_end($vmid) if $vmstate; };
+ eval { vm_mon_cmd($vmid, "snapshot-end") if $running; };
warn $@ if $@;
if ($err) {
warn "snapshot create failed: starting cleanup\n";
- eval { snapshot_delete($vmid, $snapname); };
+ eval { snapshot_delete($vmid, $snapname, 0, $drivehash); };
warn $@ if $@;
die $err;
}
&$snapshot_commit($vmid, $snapname);
}
+# Note: $drivehash is only set when called from snapshot_create.
sub snapshot_delete {
- my ($vmid, $snapname, $force) = @_;
+ my ($vmid, $snapname, $force, $drivehash) = @_;
my $prepare = 1;
my $snap;
+ my $unused = [];
+
+ my $unlink_parent = sub {
+ my ($confref, $new_parent) = @_;
+ if ($confref->{parent} && $confref->{parent} eq $snapname) {
+ if ($new_parent) {
+ $confref->{parent} = $new_parent;
+ } else {
+ delete $confref->{parent};
+ }
+ }
+ };
+
my $updatefn = sub {
+ my ($remove_drive) = @_;
my $conf = load_config($vmid);
- check_lock($conf) if !$force;
+ check_lock($conf) if !$drivehash;
$snap = $conf->{snapshots}->{$snapname};
die "snapshot '$snapname' does not exist\n" if !defined($snap);
# remove parent refs
+ &$unlink_parent($conf, $snap->{parent});
foreach my $sn (keys %{$conf->{snapshots}}) {
next if $sn eq $snapname;
- my $snapref = $conf->{snapshots}->{$sn};
- if ($snapref->{parent} && $snapref->{parent} eq $snapname) {
- if ($snap->{parent}) {
- $snapref->{parent} = $snap->{parent};
- } else {
- delete $snapref->{parent};
- }
+ &$unlink_parent($conf->{snapshots}->{$sn}, $snap->{parent});
+ }
+
+ if ($remove_drive) {
+ if ($remove_drive eq 'vmstate') {
+ delete $snap->{$remove_drive};
+ } else {
+ my $drive = parse_drive($remove_drive, $snap->{$remove_drive});
+ my $volid = $drive->{file};
+ delete $snap->{$remove_drive};
+ add_unused_volume($conf, $volid);
}
}
$snap->{snapstate} = 'delete';
} else {
delete $conf->{snapshots}->{$snapname};
+ delete $conf->{lock} if $drivehash;
+ foreach my $volid (@$unused) {
+ add_unused_volume($conf, $volid);
+ }
}
update_config_nolock($vmid, $conf, 1);
lock_config($vmid, $updatefn);
- # now remove all internal snapshots
+ # now remove vmstate file
my $storecfg = PVE::Storage::config();
- PVE::QemuServer::foreach_drive($snap, sub {
+ if ($snap->{vmstate}) {
+ eval { PVE::Storage::vdisk_free($storecfg, $snap->{vmstate}); };
+ if (my $err = $@) {
+ die $err if !$force;
+ warn $err;
+ }
+ # save changes (remove vmstate from snapshot)
+ lock_config($vmid, $updatefn, 'vmstate') if !$force;
+ };
+
+ # now remove all internal snapshots
+ foreach_drive($snap, sub {
my ($ds, $drive) = @_;
return if drive_is_cdrom($drive);
+
my $volid = $drive->{file};
my $device = "drive-$ds";
- qemu_volume_snapshot_delete($vmid, $device, $storecfg, $volid, $snapname);
+ if (!$drivehash || $drivehash->{$ds}) {
+ eval { qemu_volume_snapshot_delete($vmid, $device, $storecfg, $volid, $snapname); };
+ if (my $err = $@) {
+ die $err if !$force;
+ warn $err;
+ }
+ }
+
+ # save changes (remove drive fron snapshot)
+ lock_config($vmid, $updatefn, $ds) if !$force;
+ push @$unused, $volid;
});
# now cleanup config