use strict;
use warnings;
+
use POSIX;
use IO::Handle;
use IO::Select;
optional => 1,
});
-PVE::JSONSchema::register_standard_option('pve-snapshot-name', {
- description => "The name of the snapshot.",
- type => 'string', format => 'pve-configid',
- maxLength => 40,
-});
-
PVE::JSONSchema::register_standard_option('pve-qm-image-format', {
type => 'string',
enum => [qw(raw cow qcow qed qcow2 vmdk cloop)],
optional => 1,
type => 'string',
description => "Lock/unlock the VM.",
- enum => [qw(backup clone create migrate rollback snapshot snapshot-delete)],
+ enum => [qw(backup clone create migrate rollback snapshot snapshot-delete suspending suspended)],
},
cpulimit => {
optional => 1,
__EOD__
my $net_fmt = {
- macaddr => {
- type => 'string',
- pattern => qr/[0-9a-f]{2}(?::[0-9a-f]{2}){5}/i,
+ macaddr => get_standard_option('mac-addr', {
description => "MAC address. That address must be unique withing your network. This is automatically generated if not specified.",
- format_description => "XX:XX:XX:XX:XX:XX",
- optional => 1,
- },
+ }),
model => {
type => 'string',
description => "Network Card Model. The 'virtio' model provides the best performance with very low CPU overhead. If your guest does not support this driver, it is usually best to use 'e1000'.",
pattern => qr/$PCIRE(;$PCIRE)*/,
format_description => 'HOSTPCIID[;HOSTPCIID2...]',
description => <<EODESCR,
-Host PCI device pass through. The PCI ID of a host's PCI device or a list
+Host PCI device pass through. The PCI ID of a host's PCI device or a list
of PCI virtual functions of the host. HOSTPCIID syntax is:
'bus:dev.func' (hexadecimal numbers)
verbose_description => <<EODESCR,
Map host PCI devices into guest.
-NOTE: This option allows direct access to host hardware. So it is no longer
+NOTE: This option allows direct access to host hardware. So it is no longer
possible to migrate such machines - use with special care.
CAUTION: Experimental! User reported problems with this option.
my $path;
my $volid = $drive->{file};
my $format;
-
+
if (drive_is_cdrom($drive)) {
$path = get_iso_path($storecfg, $vmid, $volid);
} else {
sub check_local_resources {
my ($conf, $noerr) = @_;
- my $loc_res = 0;
+ my @loc_res = ();
- $loc_res = 1 if $conf->{hostusb}; # old syntax
- $loc_res = 1 if $conf->{hostpci}; # old syntax
+ push @loc_res, "hostusb" if $conf->{hostusb}; # old syntax
+ push @loc_res, "hostpci" if $conf->{hostpci}; # old syntax
- $loc_res = 1 if $conf->{ivshmem};
+ push @loc_res, "ivshmem" if $conf->{ivshmem};
foreach my $k (keys %$conf) {
next if $k =~ m/^usb/ && ($conf->{$k} eq 'spice');
# sockets are safe: they will recreated be on the target side post-migrate
next if $k =~ m/^serial/ && ($conf->{$k} eq 'socket');
- $loc_res = 1 if $k =~ m/^(usb|hostpci|serial|parallel)\d+$/;
+ push @loc_res, $k if $k =~ m/^(usb|hostpci|serial|parallel)\d+$/;
}
- die "VM uses local resources\n" if $loc_res && !$noerr;
+ die "VM uses local resources\n" if scalar @loc_res && !$noerr;
- return $loc_res;
+ return \@loc_res;
}
# check if used storages are available on all nodes (use by migrate)
type => 'number',
optional => 1,
},
+ lock => {
+ description => "The current config lock, if any.",
+ type => 'string',
+ optional => 1,
+ }
};
my $last_proc_pid_stat;
$d->{template} = PVE::QemuConfig->is_template($conf);
$d->{serial} = 1 if conf_has_serial($conf);
+ $d->{lock} = $conf->{lock} if $conf->{lock};
$res->{$vmid} = $d;
}
push @$cmd, get_cpu_options($conf, $arch, $kvm, $machine_type, $kvm_off, $kvmver, $winversion, $gpu_passthrough);
PVE::QemuServer::Memory::config($conf, $vmid, $sockets, $cores, $defaults, $hotplug_features, $cmd);
-
+
push @$cmd, '-S' if $conf->{freeze};
push @$cmd, '-k', $conf->{keyboard} if defined($conf->{keyboard});
my $queues = '';
if($conf->{scsihw} && $conf->{scsihw} eq "virtio-scsi-single" && $drive->{queues}){
$queues = ",num_queues=$drive->{queues}";
- }
+ }
push @$devices, '-device', "$scsihw_type,id=$controller_prefix$controller$pciaddr$iothread$queues" if !$scsicontroller->{$controller};
$scsicontroller->{$controller}=1;
push @$cmd, '-global', join(',', @$globalFlags)
if scalar(@$globalFlags);
+ if (my $vmstate = $conf->{vmstate}) {
+ my $statepath = PVE::Storage::path($storecfg, $vmstate);
+ PVE::Storage::activate_volumes($storecfg, [$vmstate]);
+ push @$cmd, '-loadstate', $statepath;
+ }
+
# add custom args
if ($conf->{args}) {
my $aa = PVE::Tools::split_args($conf->{args});
die "you can't start a vm if it's a template\n" if PVE::QemuConfig->is_template($conf);
- PVE::QemuConfig->check_lock($conf) if !$skiplock;
+ my $is_suspended = PVE::QemuConfig->has_lock($conf, 'suspended');
+
+ PVE::QemuConfig->check_lock($conf)
+ if !($skiplock || $is_suspended);
die "VM $vmid already running\n" if check_running($vmid, undef, $migratedfrom);
PVE::GuestHelpers::exec_hookscript($conf, $vmid, 'pre-start', 1);
+ if ($is_suspended) {
+ # enforce machine type on suspended vm to ensure HW compatibility
+ $forcemachine = $conf->{runningmachine};
+ print "Resuming suspended VM\n";
+ }
+
my ($cmd, $vollist, $spice_port) = config_to_command($storecfg, $vmid, $conf, $defaults, $forcemachine);
my $migrate_port = 0;
my $cpuunits = defined($conf->{cpuunits}) ? $conf->{cpuunits}
: $defaults->{cpuunits};
- my $start_timeout = $conf->{hugepages} ? 300 : 30;
+ my $start_timeout = ($conf->{hugepages} || $is_suspended) ? 300 : 30;
my %run_params = (timeout => $statefile ? undef : $start_timeout, umask => 0077);
my %properties = (
property => "guest-stats-polling-interval",
value => 2) if (!defined($conf->{balloon}) || $conf->{balloon});
+ if ($is_suspended && (my $vmstate = $conf->{vmstate})) {
+ print "Resumed VM, removing state\n";
+ delete $conf->@{qw(lock vmstate runningmachine)};
+ PVE::Storage::deactivate_volumes($storecfg, [$vmstate]);
+ PVE::Storage::vdisk_free($storecfg, $vmstate);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+
PVE::GuestHelpers::exec_hookscript($conf, $vmid, 'post-start');
});
}
}
sub vm_suspend {
- my ($vmid, $skiplock) = @_;
+ my ($vmid, $skiplock, $includestate, $statestorage) = @_;
+
+ my $conf;
+ my $path;
+ my $storecfg;
+ my $vmstate;
PVE::QemuConfig->lock_config($vmid, sub {
- my $conf = PVE::QemuConfig->load_config($vmid);
+ $conf = PVE::QemuConfig->load_config($vmid);
+ my $is_backing_up = PVE::QemuConfig->has_lock($conf, 'backup');
PVE::QemuConfig->check_lock($conf)
- if !($skiplock || PVE::QemuConfig->has_lock($conf, 'backup'));
+ if !($skiplock || $is_backing_up);
- vm_mon_cmd($vmid, "stop");
+ die "cannot suspend to disk during backup\n"
+ if $is_backing_up && $includestate;
+
+ if ($includestate) {
+ $conf->{lock} = 'suspending';
+ my $date = strftime("%Y-%m-%d", localtime(time()));
+ $storecfg = PVE::Storage::config();
+ $vmstate = PVE::QemuConfig->__snapshot_save_vmstate($vmid, $conf, "suspend-$date", $storecfg, $statestorage, 1);
+ $path = PVE::Storage::path($storecfg, $vmstate);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } else {
+ vm_mon_cmd($vmid, "stop");
+ }
});
+
+ if ($includestate) {
+ # save vm state
+ PVE::Storage::activate_volumes($storecfg, [$vmstate]);
+
+ eval {
+ vm_mon_cmd($vmid, "savevm-start", statefile => $path);
+ for(;;) {
+ my $state = vm_mon_cmd_nocheck($vmid, "query-savevm");
+ if (!$state->{status}) {
+ die "savevm not active\n";
+ } elsif ($state->{status} eq 'active') {
+ sleep(1);
+ next;
+ } elsif ($state->{status} eq 'completed') {
+ print "State saved, quitting\n";
+ last;
+ } elsif ($state->{status} eq 'failed' && $state->{error}) {
+ die "query-savevm failed with error '$state->{error}'\n"
+ } else {
+ die "query-savevm returned status '$state->{status}'\n";
+ }
+ }
+ };
+ my $err = $@;
+
+ PVE::QemuConfig->lock_config($vmid, sub {
+ $conf = PVE::QemuConfig->load_config($vmid);
+ if ($err) {
+ # cleanup, but leave suspending lock, to indicate something went wrong
+ eval {
+ vm_mon_cmd($vmid, "savevm-end");
+ PVE::Storage::deactivate_volumes($storecfg, [$vmstate]);
+ PVE::Storage::vdisk_free($storecfg, $vmstate);
+ delete $conf->@{qw(vmstate runningmachine)};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ };
+ warn $@ if $@;
+ die $err;
+ }
+
+ die "lock changed unexpectedly\n"
+ if !PVE::QemuConfig->has_lock($conf, 'suspending');
+
+ vm_qmp_command($vmid, { execute => "quit" });
+ $conf->{lock} = 'suspended';
+ PVE::QemuConfig->write_config($vmid, $conf);
+ });
+ }
}
sub vm_resume {
return if $line =~ m/^lock:/;
return if $line =~ m/^unused\d+:/;
return if $line =~ m/^parent:/;
- return if $line =~ m/^template:/; # restored VM is never a template
my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg');
if (($line =~ m/^(vlan(\d+)):\s*(\S+)\s*$/)) {
my $storage_name = PVE::Storage::parse_volume_id($volid);
- if ($qemu_snap_storage->{$storecfg->{ids}->{$storage_name}->{type}}
+ if ($qemu_snap_storage->{$storecfg->{ids}->{$storage_name}->{type}}
&& !$storecfg->{ids}->{$storage_name}->{krbd}){
return 1;
}
}
sub qemu_drive_mirror {
- my ($vmid, $drive, $dst_volid, $vmiddst, $is_zero_initialized, $jobs, $skipcomplete, $qga) = @_;
+ my ($vmid, $drive, $dst_volid, $vmiddst, $is_zero_initialized, $jobs, $skipcomplete, $qga, $bwlimit) = @_;
$jobs = {} if !$jobs;
my $opts = { timeout => 10, device => "drive-$drive", mode => "existing", sync => "full", target => $qemu_target };
$opts->{format} = $format if $format;
- print "drive mirror is starting for drive-$drive\n";
-
- eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); }; #if a job already run for this device,it's throw an error
+ if (defined($bwlimit)) {
+ $opts->{speed} = $bwlimit * 1024;
+ print "drive mirror is starting for drive-$drive with bandwidth limit: ${bwlimit} KB/s\n";
+ } else {
+ print "drive mirror is starting for drive-$drive\n";
+ }
+ # if a job already runs for this device we get an error, catch it for cleanup
+ eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); };
if (my $err = $@) {
eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
- die "mirroring error: $err";
+ warn "$@\n" if $@;
+ die "mirroring error: $err\n";
}
qemu_drive_mirror_monitor ($vmid, $vmiddst, $jobs, $skipcomplete, $qga);
sub clone_disk {
my ($storecfg, $vmid, $running, $drivename, $drive, $snapname,
- $newvmid, $storage, $format, $full, $newvollist, $jobs, $skipcomplete, $qga) = @_;
+ $newvmid, $storage, $format, $full, $newvollist, $jobs, $skipcomplete, $qga, $bwlimit) = @_;
my $newvolid;
my $sparseinit = PVE::Storage::volume_has_feature($storecfg, 'sparseinit', $newvolid);
if (!$running || $snapname) {
+ # TODO: handle bwlimits
qemu_img_convert($drive->{file}, $newvolid, $size, $snapname, $sparseinit);
} else {
if $drive->{iothread};
}
- qemu_drive_mirror($vmid, $drivename, $newvolid, $newvmid, $sparseinit, $jobs, $skipcomplete, $qga);
+ qemu_drive_mirror($vmid, $drivename, $newvolid, $newvmid, $sparseinit, $jobs, $skipcomplete, $qga, $bwlimit);
}
}