X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FQemuMigrate.pm;h=27c5b7af7b42bd0979aec9dc5797d7b508c1070d;hb=71c58bb7edd5916e61092acf67d57e032325ed9a;hp=27cf7e36eb3706d3cc73cadad0f0ea436a7fa4dc;hpb=50d8dd5dc731346ffeca93b4e2d0f2aef2c05fa2;p=qemu-server.git diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm index 27cf7e3..27c5b7a 100644 --- a/PVE/QemuMigrate.pm +++ b/PVE/QemuMigrate.pm @@ -11,6 +11,8 @@ use PVE::Tools; use PVE::Cluster; use PVE::Storage; use PVE::QemuServer; +use PVE::QemuServer::Machine; +use PVE::QemuServer::Monitor qw(mon_cmd); use Time::HiRes qw( usleep ); use PVE::RPCEnvironment; use PVE::ReplicationConfig; @@ -215,13 +217,13 @@ sub prepare { die "can't migrate running VM without --online\n" if !$online; $running = $pid; - $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf); + $self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf); } - - if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) { + my $loc_res = PVE::QemuServer::check_local_resources($conf, 1); + if (scalar @$loc_res) { if ($self->{running} || !$self->{opts}->{force}) { - die "can't migrate VM which uses local devices\n"; + die "can't migrate VM which uses local devices: " . join(", ", @$loc_res) . "\n"; } else { $self->log('info', "migrating VM which uses local devices"); } @@ -234,7 +236,7 @@ sub prepare { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1); # check if storage is available on both nodes - my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid; + my $targetsid = $self->{opts}->{targetstorage} // $sid; my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); @@ -246,6 +248,7 @@ sub prepare { if !$plugin->check_connection($sid, $scfg); } else { # only activate if not shared + next if ($volid =~ m/vm-\d+-cloudinit/); push @$need_activate, $volid; } } @@ -269,6 +272,8 @@ sub sync_disks { # local volumes which have been copied $self->{volumes} = []; + my $override_targetsid = $self->{opts}->{targetstorage}; + eval { # found local volumes and their origin @@ -301,7 +306,7 @@ sub sync_disks { next if @{$dl->{$storeid}} == 0; - my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $storeid; + my $targetsid = $override_targetsid // $storeid; # check if storage is available on target node PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); @@ -340,7 +345,7 @@ sub sync_disks { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid; + my $targetsid = $override_targetsid // $sid; # check if storage is available on both nodes my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); @@ -351,7 +356,13 @@ sub sync_disks { $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot'; - die "local cdrom image\n" if $attr->{cdrom}; + if ($attr->{cdrom}) { + if ($volid =~ /vm-\d+-cloudinit/) { + $local_volumes->{$volid}->{ref} = 'generated'; + return; + } + die "local cdrom image\n"; + } my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid); @@ -392,6 +403,8 @@ sub sync_disks { $self->log('info', "found local disk '$vol' (in current VM config)\n"); } elsif ($ref eq 'snapshot') { $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n"); + } elsif ($ref eq 'generated') { + $self->log('info', "found generated disk '$vol' (in current VM config)\n"); } else { $self->log('info', "found local disk '$vol'\n"); } @@ -404,10 +417,6 @@ sub sync_disks { $self->log('warn', "$err"); } - if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) { - $self->{opts}->{targetstorage} = 1; #use same sid for remote local - } - if ($abort) { die "can't migrate VM - check log\n"; } @@ -417,8 +426,7 @@ sub sync_disks { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid); - my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') || - ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm'); + my $migratable = $scfg->{type} =~ /^(?:dir|zfspool|lvmthin|lvm)$/; die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n" if !$migratable; @@ -429,32 +437,42 @@ sub sync_disks { } } - my $rep_volumes; - - $self->log('info', "copying disk images"); - my $rep_cfg = PVE::ReplicationConfig->new(); - if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) { die "can't live migrate VM with replicated volumes\n" if $self->{running}; + $self->log('info', "replicating disk images"); my $start_time = time(); - my $logfunc = sub { my ($msg) = @_; $self->log('info', $msg); }; - $rep_volumes = PVE::Replication::run_replication( + my $logfunc = sub { $self->log('info', shift) }; + $self->{replicated_volumes} = PVE::Replication::run_replication( 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc); - $self->{replicated_volumes} = $rep_volumes; } + $self->log('info', "copying local disk images") if scalar(%$local_volumes); + foreach my $volid (keys %$local_volumes) { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid}->{ref} eq 'config') { + my $targetsid = $override_targetsid // $sid; + my $ref = $local_volumes->{$volid}->{ref}; + if ($self->{running} && $ref eq 'config') { push @{$self->{online_local_volumes}}, $volid; + } elsif ($ref eq 'generated') { + die "can't live migrate VM with local cloudinit disk. use a shared storage instead\n" if $self->{running}; + # skip all generated volumes but queue them for deletion in phase3_cleanup + push @{$self->{volumes}}, $volid; + next; } else { - next if $rep_volumes->{$volid}; + next if $self->{replicated_volumes}->{$volid}; push @{$self->{volumes}}, $volid; - my $insecure = $self->{opts}->{migration_type} eq 'insecure'; + my $opts = $self->{opts}; + my $insecure = $opts->{migration_type} eq 'insecure'; my $with_snapshots = $local_volumes->{$volid}->{snapshots}; - PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid, - undef, undef, undef, undef, $insecure, $with_snapshots); + # use 'migrate' limit for transfer to other node + my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$targetsid, $sid], $opts->{bwlimit}); + # JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps + $bwlimit = $bwlimit * 1024 if defined($bwlimit); + + PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid, + undef, undef, undef, $bwlimit, $insecure, $with_snapshots); } } }; @@ -531,7 +549,7 @@ sub phase2 { my $spice_ticket; if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { - my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice'); + my $res = mon_cmd($vmid, 'query-spice'); $spice_ticket = $res->{ticket}; } @@ -554,8 +572,8 @@ sub phase2 { push @$cmd, '--machine', $self->{forcemachine}; } - if ($self->{opts}->{targetstorage}) { - push @$cmd, '--targetstorage', $self->{opts}->{targetstorage}; + if ($self->{online_local_volumes}) { + push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1'); } my $spice_port; @@ -580,10 +598,10 @@ sub phase2 { $rport = int($1); $ruri = "tcp:$raddr:$rport"; } - elsif ($line =~ m/^spice listens on port (\d+)$/) { + elsif ($line =~ m/^spice listens on port (\d+)$/) { $spice_port = int($1); } - elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) { + elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) { my $volid = $4; my $nbd_uri = "nbd:$1:$2:exportname=$3"; my $targetdrive = $3; @@ -642,7 +660,9 @@ sub phase2 { my $start = time(); - if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) { + my $opt_bwlimit = $self->{opts}->{bwlimit}; + + if (defined($self->{online_local_volumes})) { $self->{storage_migration} = 1; $self->{storage_migration_jobs} = {}; $self->log('info', "starting storage migration"); @@ -650,9 +670,14 @@ sub phase2 { die "The number of local disks does not match between the source and the destination.\n" if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}}); foreach my $drive (keys %{$self->{target_drive}}){ - my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri}; + my $target = $self->{target_drive}->{$drive}; + my $nbd_uri = $target->{nbd_uri}; + my $source_sid = PVE::Storage::Plugin::parse_volume_id($conf->{$drive}); + my $target_sid = PVE::Storage::Plugin::parse_volume_id($target->{volid}); + my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$source_sid, $target_sid], $opt_bwlimit); + $self->log('info', "$drive: start migration to $nbd_uri"); - PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1); + PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1, undef, $bwlimit); } } @@ -662,14 +687,24 @@ sub phase2 { # load_defaults my $defaults = PVE::QemuServer::load_defaults(); - # always set migrate speed (overwrite kvm default of 32m) - # we set a very hight default of 8192m which is basically unlimited - my $migrate_speed = $defaults->{migrate_speed} || 8192; - $migrate_speed = $conf->{migrate_speed} || $migrate_speed; - $migrate_speed = $migrate_speed * 1048576; + # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the + # migrate_speed parameter in qm.conf - take the lower of the two. + my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', undef, $opt_bwlimit) // 0; + my $migrate_speed = $conf->{migrate_speed} // $bwlimit; + # migrate_speed is in MB/s, bwlimit in KB/s + $migrate_speed *= 1024; + + $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed; + + # always set migrate speed (overwrite kvm default of 32m) we set a very high + # default of 8192m which is basically unlimited + $migrate_speed ||= ($defaults->{migrate_speed} || 8192) * 1024; + + # qmp takes migrate_speed in B/s. + $migrate_speed *= 1024; $self->log('info', "migrate_set_speed: $migrate_speed"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed)); + mon_cmd($vmid, "migrate_set_speed", value => int($migrate_speed)); }; $self->log('info', "migrate_set_speed error: $@") if $@; @@ -678,7 +713,7 @@ sub phase2 { if (defined($migrate_downtime)) { $self->log('info', "migrate_set_downtime: $migrate_downtime"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); + mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); }; $self->log('info', "migrate_set_downtime error: $@") if $@; } @@ -696,7 +731,7 @@ sub phase2 { $self->log('info', "set cachesize: $cachesize"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize)); + mon_cmd($vmid, "migrate-set-cache-size", value => int($cachesize)); }; $self->log('info', "migrate-set-cache-size error: $@") if $@; @@ -707,13 +742,13 @@ sub phase2 { my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node}); my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem"; - my $subject = PVE::AccessControl::read_x509_subject_spice($filename); + my $subject = PVE::AccessControl::read_x509_subject_spice($filename); $self->log('info', "spice client_migrate_info"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice', - hostname => $proxyticket, 'tls-port' => $spice_port, + mon_cmd($vmid, "client_migrate_info", protocol => 'spice', + hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port, 'cert-subject' => $subject); }; $self->log('info', "client_migrate_info error: $@") if $@; @@ -722,7 +757,7 @@ sub phase2 { $self->log('info', "start migrate command to $ruri"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri); + mon_cmd($vmid, "migrate", uri => $ruri); }; my $merr = $@; $self->log('info', "migrate uri => $ruri failed: $merr") if $merr; @@ -740,7 +775,7 @@ sub phase2 { usleep($usleep); my $stat; eval { - $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate"); + $stat = mon_cmd($vmid, "query-migrate"); }; if (my $err = $@) { $err_count++; @@ -809,7 +844,7 @@ sub phase2 { $migrate_downtime *= 2; $self->log('info', "migrate_set_downtime: $migrate_downtime"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); + mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); }; $self->log('info', "migrate_set_downtime error: $@") if $@; } @@ -818,7 +853,7 @@ sub phase2 { $lstat = $stat->{ram}->{transferred}; - + } else { die $merr if $merr; die "unable to parse migration status '$stat->{status}' - aborting\n"; @@ -836,7 +871,7 @@ sub phase2_cleanup { $self->log('info', "migrate_cancel"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel"); + mon_cmd($vmid, "migrate_cancel"); }; $self->log('info', "migrate_cancel error: $@") if $@; @@ -862,7 +897,7 @@ sub phase2_cleanup { } my $nodename = PVE::INotify::nodename(); - + my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename]; eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; if (my $err = $@) { @@ -911,11 +946,11 @@ sub phase3_cleanup { if (my $err = $@) { eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; - die "Failed to completed storage migration\n"; + die "Failed to complete storage migration: $err\n"; } else { foreach my $target_drive (keys %{$self->{target_drive}}) { my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid}); - $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive); + $conf->{$target_drive} = PVE::QemuServer::print_drive($drive); PVE::QemuConfig->write_config($vmid, $conf); } } @@ -966,6 +1001,11 @@ sub phase3_cleanup { $self->{errors} = 1; } } + + if ($self->{storage_migration} && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && $self->{running}) { + my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim']; + eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; + } } # close tunnel on successful migration, on error phase2_cleanup closed it @@ -982,12 +1022,12 @@ sub phase3_cleanup { if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) { $self->log('info', "Waiting for spice server migration"); while (1) { - my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice'); + my $res = mon_cmd($vmid, 'query-spice'); last if int($res->{'migrated'}) == 1; last if $timer > 50; $timer ++; usleep(200000); - } + } } };