use strict;
use warnings;
+
use POSIX;
use IO::Handle;
use IO::Select;
optional => 1,
});
-PVE::JSONSchema::register_standard_option('pve-snapshot-name', {
- description => "The name of the snapshot.",
- type => 'string', format => 'pve-configid',
- maxLength => 40,
-});
-
PVE::JSONSchema::register_standard_option('pve-qm-image-format', {
type => 'string',
enum => [qw(raw cow qcow qed qcow2 vmdk cloop)],
optional => 1,
type => 'string',
description => "Lock/unlock the VM.",
- enum => [qw(backup clone create migrate rollback snapshot snapshot-delete)],
+ enum => [qw(backup clone create migrate rollback snapshot snapshot-delete suspending suspended)],
},
cpulimit => {
optional => 1,
__EOD__
my $net_fmt = {
- macaddr => {
- type => 'string',
- pattern => qr/[0-9a-f]{2}(?::[0-9a-f]{2}){5}/i,
+ macaddr => get_standard_option('mac-addr', {
description => "MAC address. That address must be unique withing your network. This is automatically generated if not specified.",
- format_description => "XX:XX:XX:XX:XX:XX",
- optional => 1,
- },
+ }),
model => {
type => 'string',
description => "Network Card Model. The 'virtio' model provides the best performance with very low CPU overhead. If your guest does not support this driver, it is usually best to use 'e1000'.",
type => 'number',
optional => 1,
},
+ lock => {
+ description => "The current config lock, if any.",
+ type => 'string',
+ optional => 1,
+ }
};
my $last_proc_pid_stat;
$d->{template} = PVE::QemuConfig->is_template($conf);
$d->{serial} = 1 if conf_has_serial($conf);
+ $d->{lock} = $conf->{lock} if $conf->{lock};
$res->{$vmid} = $d;
}
push @$cmd, '-global', join(',', @$globalFlags)
if scalar(@$globalFlags);
+ if (my $vmstate = $conf->{vmstate}) {
+ my $statepath = PVE::Storage::path($storecfg, $vmstate);
+ PVE::Storage::activate_volumes($storecfg, [$vmstate]);
+ push @$cmd, '-loadstate', $statepath;
+ }
+
# add custom args
if ($conf->{args}) {
my $aa = PVE::Tools::split_args($conf->{args});
sub qemu_iothread_del {
my($conf, $vmid, $deviceid) = @_;
- my $device = parse_drive($deviceid, $conf->{$deviceid});
+ my $confid = $deviceid;
+ if ($deviceid =~ m/^(?:virtioscsi|scsihw)(\d+)$/) {
+ $confid = 'scsi' . $1;
+ }
+ my $device = parse_drive($confid, $conf->{$confid});
if ($device->{iothread}) {
my $iothreads = vm_iothreads_list($vmid);
qemu_objectdel($vmid, "iothread-$deviceid") if $iothreads->{"iothread-$deviceid"};
die "you can't start a vm if it's a template\n" if PVE::QemuConfig->is_template($conf);
- PVE::QemuConfig->check_lock($conf) if !$skiplock;
+ my $is_suspended = PVE::QemuConfig->has_lock($conf, 'suspended');
+
+ PVE::QemuConfig->check_lock($conf)
+ if !($skiplock || $is_suspended);
die "VM $vmid already running\n" if check_running($vmid, undef, $migratedfrom);
PVE::GuestHelpers::exec_hookscript($conf, $vmid, 'pre-start', 1);
+ if ($is_suspended) {
+ # enforce machine type on suspended vm to ensure HW compatibility
+ $forcemachine = $conf->{runningmachine};
+ print "Resuming suspended VM\n";
+ }
+
my ($cmd, $vollist, $spice_port) = config_to_command($storecfg, $vmid, $conf, $defaults, $forcemachine);
my $migrate_port = 0;
my $cpuunits = defined($conf->{cpuunits}) ? $conf->{cpuunits}
: $defaults->{cpuunits};
- my $start_timeout = $conf->{hugepages} ? 300 : 30;
+ my $start_timeout = ($conf->{hugepages} || $is_suspended) ? 300 : 30;
my %run_params = (timeout => $statefile ? undef : $start_timeout, umask => 0077);
my %properties = (
property => "guest-stats-polling-interval",
value => 2) if (!defined($conf->{balloon}) || $conf->{balloon});
+ if ($is_suspended && (my $vmstate = $conf->{vmstate})) {
+ print "Resumed VM, removing state\n";
+ delete $conf->@{qw(lock vmstate runningmachine)};
+ PVE::Storage::deactivate_volumes($storecfg, [$vmstate]);
+ PVE::Storage::vdisk_free($storecfg, $vmstate);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+
PVE::GuestHelpers::exec_hookscript($conf, $vmid, 'post-start');
});
}
}
sub vm_suspend {
- my ($vmid, $skiplock) = @_;
+ my ($vmid, $skiplock, $includestate, $statestorage) = @_;
+
+ my $conf;
+ my $path;
+ my $storecfg;
+ my $vmstate;
PVE::QemuConfig->lock_config($vmid, sub {
- my $conf = PVE::QemuConfig->load_config($vmid);
+ $conf = PVE::QemuConfig->load_config($vmid);
+ my $is_backing_up = PVE::QemuConfig->has_lock($conf, 'backup');
PVE::QemuConfig->check_lock($conf)
- if !($skiplock || PVE::QemuConfig->has_lock($conf, 'backup'));
+ if !($skiplock || $is_backing_up);
+
+ die "cannot suspend to disk during backup\n"
+ if $is_backing_up && $includestate;
- vm_mon_cmd($vmid, "stop");
+ if ($includestate) {
+ $conf->{lock} = 'suspending';
+ my $date = strftime("%Y-%m-%d", localtime(time()));
+ $storecfg = PVE::Storage::config();
+ $vmstate = PVE::QemuConfig->__snapshot_save_vmstate($vmid, $conf, "suspend-$date", $storecfg, $statestorage, 1);
+ $path = PVE::Storage::path($storecfg, $vmstate);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } else {
+ vm_mon_cmd($vmid, "stop");
+ }
});
+
+ if ($includestate) {
+ # save vm state
+ PVE::Storage::activate_volumes($storecfg, [$vmstate]);
+
+ eval {
+ vm_mon_cmd($vmid, "savevm-start", statefile => $path);
+ for(;;) {
+ my $state = vm_mon_cmd_nocheck($vmid, "query-savevm");
+ if (!$state->{status}) {
+ die "savevm not active\n";
+ } elsif ($state->{status} eq 'active') {
+ sleep(1);
+ next;
+ } elsif ($state->{status} eq 'completed') {
+ print "State saved, quitting\n";
+ last;
+ } elsif ($state->{status} eq 'failed' && $state->{error}) {
+ die "query-savevm failed with error '$state->{error}'\n"
+ } else {
+ die "query-savevm returned status '$state->{status}'\n";
+ }
+ }
+ };
+ my $err = $@;
+
+ PVE::QemuConfig->lock_config($vmid, sub {
+ $conf = PVE::QemuConfig->load_config($vmid);
+ if ($err) {
+ # cleanup, but leave suspending lock, to indicate something went wrong
+ eval {
+ vm_mon_cmd($vmid, "savevm-end");
+ PVE::Storage::deactivate_volumes($storecfg, [$vmstate]);
+ PVE::Storage::vdisk_free($storecfg, $vmstate);
+ delete $conf->@{qw(vmstate runningmachine)};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ };
+ warn $@ if $@;
+ die $err;
+ }
+
+ die "lock changed unexpectedly\n"
+ if !PVE::QemuConfig->has_lock($conf, 'suspending');
+
+ vm_qmp_command($vmid, { execute => "quit" });
+ $conf->{lock} = 'suspended';
+ PVE::QemuConfig->write_config($vmid, $conf);
+ });
+ }
}
sub vm_resume {
});
}
+sub convert_iscsi_path {
+ my ($path) = @_;
+
+ if ($path =~ m|^iscsi://([^/]+)/([^/]+)/(.+)$|) {
+ my $portal = $1;
+ my $target = $2;
+ my $lun = $3;
+
+ my $initiator_name = get_initiator_name();
+
+ return "file.driver=iscsi,file.transport=tcp,file.initiator-name=$initiator_name,".
+ "file.portal=$portal,file.target=$target,file.lun=$lun,driver=raw";
+ }
+
+ die "cannot convert iscsi path '$path', unkown format\n";
+}
+
sub qemu_img_convert {
my ($src_volid, $dst_volid, $size, $snapname, $is_zero_initialized) = @_;
my $src_path = PVE::Storage::path($storecfg, $src_volid, $snapname);
my $dst_path = PVE::Storage::path($storecfg, $dst_volid);
+ my $src_is_iscsi = ($src_path =~ m|^iscsi://|);
+ my $dst_is_iscsi = ($dst_path =~ m|^iscsi://|);
+
my $cmd = [];
push @$cmd, '/usr/bin/qemu-img', 'convert', '-p', '-n';
push @$cmd, '-l', "snapshot.name=$snapname" if($snapname && $src_format eq "qcow2");
push @$cmd, '-t', 'none' if $dst_scfg->{type} eq 'zfspool';
push @$cmd, '-T', 'none' if $src_scfg->{type} eq 'zfspool';
- push @$cmd, '-f', $src_format, '-O', $dst_format, $src_path;
- if ($is_zero_initialized) {
+
+ if ($src_is_iscsi) {
+ push @$cmd, '--image-opts';
+ $src_path = convert_iscsi_path($src_path);
+ } else {
+ push @$cmd, '-f', $src_format;
+ }
+
+ if ($dst_is_iscsi) {
+ push @$cmd, '--target-image-opts';
+ $dst_path = convert_iscsi_path($dst_path);
+ } else {
+ push @$cmd, '-O', $dst_format;
+ }
+
+ push @$cmd, $src_path;
+
+ if (!$dst_is_iscsi && $is_zero_initialized) {
push @$cmd, "zeroinit:$dst_path";
} else {
push @$cmd, $dst_path;
}
sub qemu_drive_mirror {
- my ($vmid, $drive, $dst_volid, $vmiddst, $is_zero_initialized, $jobs, $skipcomplete, $qga) = @_;
+ my ($vmid, $drive, $dst_volid, $vmiddst, $is_zero_initialized, $jobs, $skipcomplete, $qga, $bwlimit) = @_;
$jobs = {} if !$jobs;
my $opts = { timeout => 10, device => "drive-$drive", mode => "existing", sync => "full", target => $qemu_target };
$opts->{format} = $format if $format;
- print "drive mirror is starting for drive-$drive\n";
-
- eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); }; #if a job already run for this device,it's throw an error
+ if (defined($bwlimit)) {
+ my $bwlimit_bps = $opts->{speed} = $bwlimit * 1024;
+ print "drive mirror is starting for drive-$drive with bandwidth limit: ${bwlimit}KB/s\n";
+ } else {
+ print "drive mirror is starting for drive-$drive\n";
+ }
+ # if a job already runs for this device we get an error, catch it for cleanup
+ eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); };
if (my $err = $@) {
eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
- die "mirroring error: $err";
+ warn "$@\n" if $@;
+ die "mirroring error: $err\n";
}
qemu_drive_mirror_monitor ($vmid, $vmiddst, $jobs, $skipcomplete, $qga);
sub clone_disk {
my ($storecfg, $vmid, $running, $drivename, $drive, $snapname,
- $newvmid, $storage, $format, $full, $newvollist, $jobs, $skipcomplete, $qga) = @_;
+ $newvmid, $storage, $format, $full, $newvollist, $jobs, $skipcomplete, $qga, $bwlimit) = @_;
my $newvolid;
my $sparseinit = PVE::Storage::volume_has_feature($storecfg, 'sparseinit', $newvolid);
if (!$running || $snapname) {
+ # TODO: handle bwlimits
qemu_img_convert($drive->{file}, $newvolid, $size, $snapname, $sparseinit);
} else {
if $drive->{iothread};
}
- qemu_drive_mirror($vmid, $drivename, $newvolid, $newvmid, $sparseinit, $jobs, $skipcomplete, $qga);
+ qemu_drive_mirror($vmid, $drivename, $newvolid, $newvmid, $sparseinit, $jobs, $skipcomplete, $qga, $bwlimit);
}
}