X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FQemuServer.pm;h=9d560ec3a91031d95fe6fd056e3f2c8298514a7c;hb=ca6abacf6b12bb20f23576bcb829f1d934768075;hp=546b22ea1c7ca857441b3366c21ed26b0b3e3217;hpb=92bdc3f0e364704202a3981c5d967eb9504628b1;p=qemu-server.git diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm index 546b22e..9d560ec 100644 --- a/PVE/QemuServer.pm +++ b/PVE/QemuServer.pm @@ -2,6 +2,7 @@ package PVE::QemuServer; use strict; use warnings; + use POSIX; use IO::Handle; use IO::Select; @@ -77,12 +78,6 @@ PVE::JSONSchema::register_standard_option('pve-qm-stateuri', { optional => 1, }); -PVE::JSONSchema::register_standard_option('pve-snapshot-name', { - description => "The name of the snapshot.", - type => 'string', format => 'pve-configid', - maxLength => 40, -}); - PVE::JSONSchema::register_standard_option('pve-qm-image-format', { type => 'string', enum => [qw(raw cow qcow qed qcow2 vmdk cloop)], @@ -300,7 +295,7 @@ my $confdesc = { optional => 1, type => 'string', description => "Lock/unlock the VM.", - enum => [qw(backup clone create migrate rollback snapshot snapshot-delete)], + enum => [qw(backup clone create migrate rollback snapshot snapshot-delete suspending suspended)], }, cpulimit => { optional => 1, @@ -802,13 +797,9 @@ The DHCP server assign addresses to the guest starting from 10.0.2.15. __EOD__ my $net_fmt = { - macaddr => { - type => 'string', - pattern => qr/[0-9a-f]{2}(?::[0-9a-f]{2}){5}/i, + macaddr => get_standard_option('mac-addr', { description => "MAC address. That address must be unique withing your network. This is automatically generated if not specified.", - format_description => "XX:XX:XX:XX:XX:XX", - optional => 1, - }, + }), model => { type => 'string', description => "Network Card Model. The 'virtio' model provides the best performance with very low CPU overhead. If your guest does not support this driver, it is usually best to use 'e1000'.", @@ -1290,7 +1281,7 @@ my $hostpci_fmt = { pattern => qr/$PCIRE(;$PCIRE)*/, format_description => 'HOSTPCIID[;HOSTPCIID2...]', description => < <{file}; my $format; - + if (drive_is_cdrom($drive)) { $path = get_iso_path($storecfg, $vmid, $volid); } else { @@ -2855,23 +2846,23 @@ sub config_list { sub check_local_resources { my ($conf, $noerr) = @_; - my $loc_res = 0; + my @loc_res = (); - $loc_res = 1 if $conf->{hostusb}; # old syntax - $loc_res = 1 if $conf->{hostpci}; # old syntax + push @loc_res, "hostusb" if $conf->{hostusb}; # old syntax + push @loc_res, "hostpci" if $conf->{hostpci}; # old syntax - $loc_res = 1 if $conf->{ivshmem}; + push @loc_res, "ivshmem" if $conf->{ivshmem}; foreach my $k (keys %$conf) { next if $k =~ m/^usb/ && ($conf->{$k} eq 'spice'); # sockets are safe: they will recreated be on the target side post-migrate next if $k =~ m/^serial/ && ($conf->{$k} eq 'socket'); - $loc_res = 1 if $k =~ m/^(usb|hostpci|serial|parallel)\d+$/; + push @loc_res, $k if $k =~ m/^(usb|hostpci|serial|parallel)\d+$/; } - die "VM uses local resources\n" if $loc_res && !$noerr; + die "VM uses local resources\n" if scalar @loc_res && !$noerr; - return $loc_res; + return \@loc_res; } # check if used storages are available on all nodes (use by migrate) @@ -3069,6 +3060,11 @@ our $vmstatus_return_properties = { type => 'number', optional => 1, }, + lock => { + description => "The current config lock, if any.", + type => 'string', + optional => 1, + } }; my $last_proc_pid_stat; @@ -3139,6 +3135,7 @@ sub vmstatus { $d->{template} = PVE::QemuConfig->is_template($conf); $d->{serial} = 1 if conf_has_serial($conf); + $d->{lock} = $conf->{lock} if $conf->{lock}; $res->{$vmid} = $d; } @@ -3801,7 +3798,7 @@ sub config_to_command { push @$cmd, get_cpu_options($conf, $arch, $kvm, $machine_type, $kvm_off, $kvmver, $winversion, $gpu_passthrough); PVE::QemuServer::Memory::config($conf, $vmid, $sockets, $cores, $defaults, $hotplug_features, $cmd); - + push @$cmd, '-S' if $conf->{freeze}; push @$cmd, '-k', $conf->{keyboard} if defined($conf->{keyboard}); @@ -3925,7 +3922,7 @@ sub config_to_command { my $queues = ''; if($conf->{scsihw} && $conf->{scsihw} eq "virtio-scsi-single" && $drive->{queues}){ $queues = ",num_queues=$drive->{queues}"; - } + } push @$devices, '-device', "$scsihw_type,id=$controller_prefix$controller$pciaddr$iothread$queues" if !$scsicontroller->{$controller}; $scsicontroller->{$controller}=1; @@ -4002,6 +3999,12 @@ sub config_to_command { push @$cmd, '-global', join(',', @$globalFlags) if scalar(@$globalFlags); + if (my $vmstate = $conf->{vmstate}) { + my $statepath = PVE::Storage::path($storecfg, $vmstate); + PVE::Storage::activate_volumes($storecfg, [$vmstate]); + push @$cmd, '-loadstate', $statepath; + } + # add custom args if ($conf->{args}) { my $aa = PVE::Tools::split_args($conf->{args}); @@ -4278,7 +4281,11 @@ sub qemu_iothread_add { sub qemu_iothread_del { my($conf, $vmid, $deviceid) = @_; - my $device = parse_drive($deviceid, $conf->{$deviceid}); + my $confid = $deviceid; + if ($deviceid =~ m/^(?:virtioscsi|scsihw)(\d+)$/) { + $confid = 'scsi' . $1; + } + my $device = parse_drive($confid, $conf->{$confid}); if ($device->{iothread}) { my $iothreads = vm_iothreads_list($vmid); qemu_objectdel($vmid, "iothread-$deviceid") if $iothreads->{"iothread-$deviceid"}; @@ -5148,7 +5155,10 @@ sub vm_start { die "you can't start a vm if it's a template\n" if PVE::QemuConfig->is_template($conf); - PVE::QemuConfig->check_lock($conf) if !$skiplock; + my $is_suspended = PVE::QemuConfig->has_lock($conf, 'suspended'); + + PVE::QemuConfig->check_lock($conf) + if !($skiplock || $is_suspended); die "VM $vmid already running\n" if check_running($vmid, undef, $migratedfrom); @@ -5214,6 +5224,12 @@ sub vm_start { PVE::GuestHelpers::exec_hookscript($conf, $vmid, 'pre-start', 1); + if ($is_suspended) { + # enforce machine type on suspended vm to ensure HW compatibility + $forcemachine = $conf->{runningmachine}; + print "Resuming suspended VM\n"; + } + my ($cmd, $vollist, $spice_port) = config_to_command($storecfg, $vmid, $conf, $defaults, $forcemachine); my $migrate_port = 0; @@ -5304,7 +5320,7 @@ sub vm_start { my $cpuunits = defined($conf->{cpuunits}) ? $conf->{cpuunits} : $defaults->{cpuunits}; - my $start_timeout = $conf->{hugepages} ? 300 : 30; + my $start_timeout = ($conf->{hugepages} || $is_suspended) ? 300 : 30; my %run_params = (timeout => $statefile ? undef : $start_timeout, umask => 0077); my %properties = ( @@ -5411,6 +5427,14 @@ sub vm_start { property => "guest-stats-polling-interval", value => 2) if (!defined($conf->{balloon}) || $conf->{balloon}); + if ($is_suspended && (my $vmstate = $conf->{vmstate})) { + print "Resumed VM, removing state\n"; + delete $conf->@{qw(lock vmstate runningmachine)}; + PVE::Storage::deactivate_volumes($storecfg, [$vmstate]); + PVE::Storage::vdisk_free($storecfg, $vmstate); + PVE::QemuConfig->write_config($vmid, $conf); + } + PVE::GuestHelpers::exec_hookscript($conf, $vmid, 'post-start'); }); } @@ -5658,17 +5682,84 @@ sub vm_stop { } sub vm_suspend { - my ($vmid, $skiplock) = @_; + my ($vmid, $skiplock, $includestate, $statestorage) = @_; + + my $conf; + my $path; + my $storecfg; + my $vmstate; PVE::QemuConfig->lock_config($vmid, sub { - my $conf = PVE::QemuConfig->load_config($vmid); + $conf = PVE::QemuConfig->load_config($vmid); + my $is_backing_up = PVE::QemuConfig->has_lock($conf, 'backup'); PVE::QemuConfig->check_lock($conf) - if !($skiplock || PVE::QemuConfig->has_lock($conf, 'backup')); + if !($skiplock || $is_backing_up); - vm_mon_cmd($vmid, "stop"); + die "cannot suspend to disk during backup\n" + if $is_backing_up && $includestate; + + if ($includestate) { + $conf->{lock} = 'suspending'; + my $date = strftime("%Y-%m-%d", localtime(time())); + $storecfg = PVE::Storage::config(); + $vmstate = PVE::QemuConfig->__snapshot_save_vmstate($vmid, $conf, "suspend-$date", $storecfg, $statestorage, 1); + $path = PVE::Storage::path($storecfg, $vmstate); + PVE::QemuConfig->write_config($vmid, $conf); + } else { + vm_mon_cmd($vmid, "stop"); + } }); + + if ($includestate) { + # save vm state + PVE::Storage::activate_volumes($storecfg, [$vmstate]); + + eval { + vm_mon_cmd($vmid, "savevm-start", statefile => $path); + for(;;) { + my $state = vm_mon_cmd_nocheck($vmid, "query-savevm"); + if (!$state->{status}) { + die "savevm not active\n"; + } elsif ($state->{status} eq 'active') { + sleep(1); + next; + } elsif ($state->{status} eq 'completed') { + print "State saved, quitting\n"; + last; + } elsif ($state->{status} eq 'failed' && $state->{error}) { + die "query-savevm failed with error '$state->{error}'\n" + } else { + die "query-savevm returned status '$state->{status}'\n"; + } + } + }; + my $err = $@; + + PVE::QemuConfig->lock_config($vmid, sub { + $conf = PVE::QemuConfig->load_config($vmid); + if ($err) { + # cleanup, but leave suspending lock, to indicate something went wrong + eval { + vm_mon_cmd($vmid, "savevm-end"); + PVE::Storage::deactivate_volumes($storecfg, [$vmstate]); + PVE::Storage::vdisk_free($storecfg, $vmstate); + delete $conf->@{qw(vmstate runningmachine)}; + PVE::QemuConfig->write_config($vmid, $conf); + }; + warn $@ if $@; + die $err; + } + + die "lock changed unexpectedly\n" + if !PVE::QemuConfig->has_lock($conf, 'suspending'); + + vm_qmp_command($vmid, { execute => "quit" }); + $conf->{lock} = 'suspended'; + PVE::QemuConfig->write_config($vmid, $conf); + }); + } } sub vm_resume { @@ -5813,7 +5904,6 @@ sub restore_update_config_line { return if $line =~ m/^lock:/; return if $line =~ m/^unused\d+:/; return if $line =~ m/^parent:/; - return if $line =~ m/^template:/; # restored VM is never a template my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg'); if (($line =~ m/^(vlan(\d+)):\s*(\S+)\s*$/)) { @@ -6496,7 +6586,7 @@ sub do_snapshots_with_qemu { my $storage_name = PVE::Storage::parse_volume_id($volid); - if ($qemu_snap_storage->{$storecfg->{ids}->{$storage_name}->{type}} + if ($qemu_snap_storage->{$storecfg->{ids}->{$storage_name}->{type}} && !$storecfg->{ids}->{$storage_name}->{krbd}){ return 1; } @@ -6637,7 +6727,7 @@ sub qemu_img_format { } sub qemu_drive_mirror { - my ($vmid, $drive, $dst_volid, $vmiddst, $is_zero_initialized, $jobs, $skipcomplete, $qga) = @_; + my ($vmid, $drive, $dst_volid, $vmiddst, $is_zero_initialized, $jobs, $skipcomplete, $qga, $bwlimit) = @_; $jobs = {} if !$jobs; @@ -6664,13 +6754,19 @@ sub qemu_drive_mirror { my $opts = { timeout => 10, device => "drive-$drive", mode => "existing", sync => "full", target => $qemu_target }; $opts->{format} = $format if $format; - print "drive mirror is starting for drive-$drive\n"; - - eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); }; #if a job already run for this device,it's throw an error + if (defined($bwlimit)) { + $opts->{speed} = $bwlimit * 1024; + print "drive mirror is starting for drive-$drive with bandwidth limit: ${bwlimit} KB/s\n"; + } else { + print "drive mirror is starting for drive-$drive\n"; + } + # if a job already runs for this device we get an error, catch it for cleanup + eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); }; if (my $err = $@) { eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) }; - die "mirroring error: $err"; + warn "$@\n" if $@; + die "mirroring error: $err\n"; } qemu_drive_mirror_monitor ($vmid, $vmiddst, $jobs, $skipcomplete, $qga); @@ -6808,7 +6904,7 @@ sub qemu_blockjobs_cancel { sub clone_disk { my ($storecfg, $vmid, $running, $drivename, $drive, $snapname, - $newvmid, $storage, $format, $full, $newvollist, $jobs, $skipcomplete, $qga) = @_; + $newvmid, $storage, $format, $full, $newvollist, $jobs, $skipcomplete, $qga, $bwlimit) = @_; my $newvolid; @@ -6843,6 +6939,7 @@ sub clone_disk { my $sparseinit = PVE::Storage::volume_has_feature($storecfg, 'sparseinit', $newvolid); if (!$running || $snapname) { + # TODO: handle bwlimits qemu_img_convert($drive->{file}, $newvolid, $size, $snapname, $sparseinit); } else { @@ -6852,7 +6949,7 @@ sub clone_disk { if $drive->{iothread}; } - qemu_drive_mirror($vmid, $drivename, $newvolid, $newvmid, $sparseinit, $jobs, $skipcomplete, $qga); + qemu_drive_mirror($vmid, $drivename, $newvolid, $newvmid, $sparseinit, $jobs, $skipcomplete, $qga, $bwlimit); } }