X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FQemuMigrate.pm;h=5ed953add834e76d3680028cf48bc4a803156592;hb=49a5a0d84b626bef92ed4f1f519d5b1b9d59a64e;hp=1753a1076bcfb82499578c5435f3c0cd7880e762;hpb=29eb909ee0105e57e54026a68cbdbd02989615cb;p=qemu-server.git diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm index 1753a10..5ed953a 100644 --- a/PVE/QemuMigrate.pm +++ b/PVE/QemuMigrate.pm @@ -2,23 +2,30 @@ package PVE::QemuMigrate; use strict; use warnings; -use PVE::AbstractMigrate; + use IO::File; use IPC::Open2; use POSIX qw( WNOHANG ); -use PVE::INotify; -use PVE::Tools; -use PVE::Cluster; -use PVE::Storage; -use PVE::QemuServer; -use PVE::QemuServer::Machine; -use PVE::QemuServer::Monitor qw(mon_cmd); use Time::HiRes qw( usleep ); + +use PVE::Cluster; +use PVE::INotify; use PVE::RPCEnvironment; +use PVE::Replication; use PVE::ReplicationConfig; use PVE::ReplicationState; -use PVE::Replication; +use PVE::Storage; +use PVE::Tools; + +use PVE::QemuConfig; +use PVE::QemuServer::CPUConfig; +use PVE::QemuServer::Drive; +use PVE::QemuServer::Helpers qw(min_version); +use PVE::QemuServer::Machine; +use PVE::QemuServer::Monitor qw(mon_cmd); +use PVE::QemuServer; +use PVE::AbstractMigrate; use base qw(PVE::AbstractMigrate); sub fork_command_pipe { @@ -141,7 +148,10 @@ sub write_tunnel { sub fork_tunnel { my ($self, $tunnel_addr) = @_; - my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : (); + my @localtunnelinfo = (); + foreach my $addr (@$tunnel_addr) { + push @localtunnelinfo, '-L', $addr; + } my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ]; @@ -183,7 +193,7 @@ sub finish_tunnel { if ($tunnel->{sock_addr}) { # ssh does not clean up on local host - my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; # + my $cmd = ['rm', '-f', @{$tunnel->{sock_addr}}]; # PVE::Tools::run_command($cmd); # .. and just to be sure check on remote side @@ -219,7 +229,17 @@ sub prepare { $self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf); + # To support custom CPU types, we keep QEMU's "-cpu" parameter intact. + # Since the parameter itself contains no reference to a custom model, + # this makes migration independent of changes to "cpu-models.conf". + if ($conf->{cpu}) { + my $cpuconf = PVE::QemuServer::CPUConfig::parse_cpu_conf_basic($conf->{cpu}); + if ($cpuconf && PVE::QemuServer::CPUConfig::is_custom_model($cpuconf->{cputype})) { + $self->{forcecpu} = PVE::QemuServer::CPUConfig::get_cpu_from_running_vm($pid); + } + } } + my $loc_res = PVE::QemuServer::check_local_resources($conf, 1); if (scalar @$loc_res) { if ($self->{running} || !$self->{opts}->{force}) { @@ -236,7 +256,7 @@ sub prepare { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1); # check if storage is available on both nodes - my $targetsid = $self->{opts}->{targetstorage} // $sid; + my $targetsid = PVE::QemuServer::map_storage($self->{opts}->{storagemap}, $sid); my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); @@ -270,10 +290,11 @@ sub sync_disks { my $conf = $self->{vmconf}; # local volumes which have been copied + # and their old_id => new_id pairs $self->{volumes} = []; + $self->{volume_map} = {}; - my $override_targetsid = $self->{opts}->{targetstorage}; - + my $storecfg = $self->{storecfg}; eval { # found local volumes and their origin @@ -293,29 +314,46 @@ sub sync_disks { $abort = 1; }; - my @sids = PVE::Storage::storage_ids($self->{storecfg}); + my @sids = PVE::Storage::storage_ids($storecfg); foreach my $storeid (@sids) { - my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid); + my $scfg = PVE::Storage::storage_config($storecfg, $storeid); next if $scfg->{shared}; - next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1); + next if !PVE::Storage::storage_check_enabled($storecfg, $storeid, undef, 1); # get list from PVE::Storage (for unused volumes) - my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid); + my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, $vmid); next if @{$dl->{$storeid}} == 0; - my $targetsid = $override_targetsid // $storeid; - + my $targetsid = PVE::QemuServer::map_storage($self->{opts}->{storagemap}, $storeid); # check if storage is available on target node - PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); + PVE::Storage::storage_check_node($storecfg, $targetsid, $self->{node}); + + # grandfather in existing mismatches + if ($targetsid ne $storeid) { + my $target_scfg = PVE::Storage::storage_config($storecfg, $targetsid); + die "content type 'images' is not available on storage '$targetsid'\n" + if !$target_scfg->{content}->{images}; + } PVE::Storage::foreach_volid($dl, sub { - my ($volid, $sid, $volname) = @_; + my ($volid, $sid, $volinfo) = @_; $local_volumes->{$volid}->{ref} = 'storage'; + + # If with_snapshots is not set for storage migrate, it tries to use + # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size + # back to qcow2 is currently not possible. + $local_volumes->{$volid}->{snapshots} = ($volinfo->{format} =~ /^(?:qcow2|vmdk)$/); + $local_volumes->{$volid}->{format} = $volinfo->{format}; }); } + my $rep_cfg = PVE::ReplicationConfig->new(); + my $replication_jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node}); + my $replicatable_volumes = !$replication_jobcfg ? {} + : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1); + my $test_volid = sub { my ($volid, $attr) = @_; @@ -342,15 +380,17 @@ sub sync_disks { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - my $targetsid = $override_targetsid // $sid; + my $targetsid = PVE::QemuServer::map_storage($self->{opts}->{storagemap}, $sid); # check if storage is available on both nodes - my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); - PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); + my $scfg = PVE::Storage::storage_check_node($storecfg, $sid); + PVE::Storage::storage_check_node($storecfg, $targetsid, $self->{node}); return if $scfg->{shared}; $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot'; + $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0; + if ($attr->{cdrom}) { if ($volid =~ /vm-\d+-cloudinit/) { $local_volumes->{$volid}->{ref} = 'generated'; @@ -359,25 +399,25 @@ sub sync_disks { die "local cdrom image\n"; } - my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid); + my ($path, $owner) = PVE::Storage::path($storecfg, $volid); die "owned by other VM (owner = VM $owner)\n" - if !$owner || ($owner != $self->{vmid}); + if !$owner || ($owner != $vmid); - my $format = PVE::QemuServer::qemu_img_format($scfg, $volname); - $local_volumes->{$volid}->{snapshots} = defined($snaprefs) || ($format =~ /^(?:qcow2|vmdk)$/); if (defined($snaprefs)) { + $local_volumes->{$volid}->{snapshots} = 1; + # we cannot migrate shapshots on local storage # exceptions: 'zfspool' or 'qcow2' files (on directory storage) die "online storage migration not possible if snapshot exists\n" if $self->{running}; - if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) { + if (!($scfg->{type} eq 'zfspool' || $local_volumes->{$volid}->{format} eq 'qcow2')) { die "non-migratable snapshot exists\n"; } } die "referenced by linked clone(s)\n" - if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid); + if PVE::Storage::volume_is_base_and_used($storecfg, $volid); }; PVE::QemuServer::foreach_volid($conf, sub { @@ -389,19 +429,20 @@ sub sync_disks { }); foreach my $vol (sort keys %$local_volumes) { + my $type = $replicatable_volumes->{$vol} ? 'local, replicated' : 'local'; my $ref = $local_volumes->{$vol}->{ref}; if ($ref eq 'storage') { - $self->log('info', "found local disk '$vol' (via storage)\n"); + $self->log('info', "found $type disk '$vol' (via storage)\n"); } elsif ($ref eq 'config') { &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol) if $self->{running} && !$self->{opts}->{"with-local-disks"}; - $self->log('info', "found local disk '$vol' (in current VM config)\n"); + $self->log('info', "found $type disk '$vol' (in current VM config)\n"); } elsif ($ref eq 'snapshot') { - $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n"); + $self->log('info', "found $type disk '$vol' (referenced by snapshot(s))\n"); } elsif ($ref eq 'generated') { $self->log('info', "found generated disk '$vol' (in current VM config)\n"); } else { - $self->log('info', "found local disk '$vol'\n"); + $self->log('info', "found $type disk '$vol'\n"); } } @@ -419,7 +460,7 @@ sub sync_disks { # additional checks for local storage foreach my $volid (keys %$local_volumes) { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid); + my $scfg = PVE::Storage::storage_config($storecfg, $sid); my $migratable = $scfg->{type} =~ /^(?:dir|zfspool|lvmthin|lvm)$/; @@ -427,38 +468,71 @@ sub sync_disks { if !$migratable; # image is a linked clone on local storage, se we can't migrate. - if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) { + if (my $basename = (PVE::Storage::parse_volname($storecfg, $volid))[3]) { die "can't migrate '$volid' as it's a clone of '$basename'"; } } - my $rep_cfg = PVE::ReplicationConfig->new(); - if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) { - die "can't live migrate VM with replicated volumes\n" if $self->{running}; + if ($replication_jobcfg) { + if ($self->{running}) { + + my $version = PVE::QemuServer::kvm_user_version(); + if (!min_version($version, 4, 2)) { + die "can't live migrate VM with replicated volumes, pve-qemu to old (< 4.2)!\n" + } + + my $live_replicatable_volumes = {}; + PVE::QemuConfig->foreach_volume($conf, sub { + my ($ds, $drive) = @_; + + my $volid = $drive->{file}; + $live_replicatable_volumes->{$ds} = $volid + if defined($replicatable_volumes->{$volid}); + }); + foreach my $drive (keys %$live_replicatable_volumes) { + my $volid = $live_replicatable_volumes->{$drive}; + + my $bitmap = "repl_$drive"; + + # start tracking before replication to get full delta + a few duplicates + $self->log('info', "$drive: start tracking writes using block-dirty-bitmap '$bitmap'"); + mon_cmd($vmid, 'block-dirty-bitmap-add', node => "drive-$drive", name => $bitmap); + + # other info comes from target node in phase 2 + $self->{target_drive}->{$drive}->{bitmap} = $bitmap; + } + } $self->log('info', "replicating disk images"); + my $start_time = time(); my $logfunc = sub { $self->log('info', shift) }; $self->{replicated_volumes} = PVE::Replication::run_replication( - 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc); + 'PVE::QemuConfig', $replication_jobcfg, $start_time, $start_time, $logfunc); } # sizes in config have to be accurate for remote node to correctly # allocate disks, rescan to be sure - my $volid_hash = PVE::QemuServer::scan_volids($self->{storecfg}, $vmid); - PVE::QemuServer::foreach_drive($conf, sub { + my $volid_hash = PVE::QemuServer::scan_volids($storecfg, $vmid); + PVE::QemuConfig->foreach_volume($conf, sub { my ($key, $drive) = @_; - my ($updated, $old_size, $new_size) = PVE::QemuServer::update_disksize($drive, $volid_hash); + my ($updated, $old_size, $new_size) = PVE::QemuServer::Drive::update_disksize($drive, $volid_hash); if (defined($updated)) { $conf->{$key} = PVE::QemuServer::print_drive($updated); $self->log('info', "size of disk '$updated->{file}' ($key) updated from $old_size to $new_size\n"); } }); + # we want to set the efidisk size in the config to the size of the + # real OVMF_VARS.fd image, else we can create a too big image, which does not work + if (defined($conf->{efidisk0})) { + PVE::QemuServer::update_efidisk_size($conf); + } + $self->log('info', "copying local disk images") if scalar(%$local_volumes); foreach my $volid (keys %$local_volumes) { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - my $targetsid = $override_targetsid // $sid; + my $targetsid = PVE::QemuServer::map_storage($self->{opts}->{storagemap}, $sid); my $ref = $local_volumes->{$volid}->{ref}; if ($self->{running} && $ref eq 'config') { push @{$self->{online_local_volumes}}, $volid; @@ -471,15 +545,28 @@ sub sync_disks { next if $self->{replicated_volumes}->{$volid}; push @{$self->{volumes}}, $volid; my $opts = $self->{opts}; - my $insecure = $opts->{migration_type} eq 'insecure'; - my $with_snapshots = $local_volumes->{$volid}->{snapshots}; # use 'migrate' limit for transfer to other node my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', [$targetsid, $sid], $opts->{bwlimit}); # JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps $bwlimit = $bwlimit * 1024 if defined($bwlimit); - PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid, - undef, undef, undef, $bwlimit, $insecure, $with_snapshots); + my $storage_migrate_opts = { + 'bwlimit' => $bwlimit, + 'insecure' => $opts->{migration_type} eq 'insecure', + 'with_snapshots' => $local_volumes->{$volid}->{snapshots}, + 'allow_rename' => !$local_volumes->{$volid}->{is_vmstate}, + }; + + my $new_volid = eval { + PVE::Storage::storage_migrate($storecfg, $volid, $self->{ssh_info}, + $targetsid, $storage_migrate_opts); + }; + if (my $err = $@) { + die "storage migration for '$volid' to storage '$targetsid' failed - $err\n"; + } + + $self->{volume_map}->{$volid} = $new_volid; + $self->log('info', "volume '$volid' is '$new_volid' on the target\n"); } } }; @@ -490,8 +577,14 @@ sub cleanup_remotedisks { my ($self) = @_; foreach my $target_drive (keys %{$self->{target_drive}}) { + my $drivestr = $self->{target_drive}->{$target_drive}->{drivestr}; + next if !defined($drivestr); + + my $drive = PVE::QemuServer::parse_drive($target_drive, $drivestr); + + # don't clean up replicated disks! + next if defined($self->{replicated_volumes}->{$drive->{file}}); - my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{drivestr}); my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file}); my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"]; @@ -504,6 +597,16 @@ sub cleanup_remotedisks { } } +sub cleanup_bitmaps { + my ($self) = @_; + foreach my $drive (keys %{$self->{target_drive}}) { + my $bitmap = $self->{target_drive}->{$drive}->{bitmap}; + next if !$bitmap; + $self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'"); + mon_cmd($self->{vmid}, 'block-dirty-bitmap-remove', node => "drive-$drive", name => $bitmap); + } +} + sub phase1 { my ($self, $vmid) = @_; @@ -540,6 +643,12 @@ sub phase1_cleanup { # fixme: try to remove ? } } + + eval { $self->cleanup_bitmaps() }; + if (my $err =$@) { + $self->log('err', $err); + } + } sub phase2 { @@ -582,15 +691,31 @@ sub phase2 { push @$cmd, '--machine', $self->{forcemachine}; } + if ($self->{forcecpu}) { + push @$cmd, '--force-cpu', $self->{forcecpu}; + } + if ($self->{online_local_volumes}) { push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1'); } my $spice_port; + my $tunnel_addr = []; + my $sock_addr = []; + # version > 0 for unix socket support + my $nbd_protocol_version = 1; + # TODO change to 'spice_ticket: \n' in 7.0 + my $input = $spice_ticket ? "$spice_ticket\n" : "\n"; + $input .= "nbd_protocol_version: $nbd_protocol_version\n"; + foreach my $volid (keys %{$self->{replicated_volumes}}) { + $input .= "replicated_volume: $volid\n"; + } + + my $target_replicated_volumes = {}; # Note: We try to keep $spice_ticket secret (do not pass via command line parameter) # instead we pipe it through STDIN - my $exitcode = PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub { + my $exitcode = PVE::Tools::run_command($cmd, input => $input, outfunc => sub { my $line = shift; if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) { @@ -619,7 +744,22 @@ sub phase2 { $self->{target_drive}->{$targetdrive}->{drivestr} = $drivestr; $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri; + } elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) { + my $drivestr = $4; + die "Destination UNIX socket's VMID does not match source VMID" if $vmid ne $2; + my $nbd_unix_addr = $1; + my $nbd_uri = "nbd:unix:$nbd_unix_addr:exportname=$3"; + my $targetdrive = $3; + $targetdrive =~ s/drive-//g; + $self->{target_drive}->{$targetdrive}->{drivestr} = $drivestr; + $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri; + push @$tunnel_addr, "$nbd_unix_addr:$nbd_unix_addr"; + push @$sock_addr, $nbd_unix_addr; + } elsif ($line =~ m/^re-using replicated volume: (\S+) - (.*)$/) { + my $drive = $1; + my $volid = $2; + $target_replicated_volumes->{$volid} = $drive; } elsif ($line =~ m/^QEMU: (.*)$/) { $self->log('info', "[$self->{node}] $1\n"); } @@ -632,26 +772,41 @@ sub phase2 { die "unable to detect remote migration address\n" if !$raddr; + if (scalar(keys %$target_replicated_volumes) != scalar(keys %{$self->{replicated_volumes}})) { + die "number of replicated disks on source and target node do not match - target node too old?\n" + } + $self->log('info', "start remote tunnel"); if ($migration_type eq 'secure') { if ($ruri =~ /^unix:/) { unlink $raddr; - $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr"); - $self->{tunnel}->{sock_addr} = $raddr; + push @$tunnel_addr, "$raddr:$raddr"; + $self->{tunnel} = $self->fork_tunnel($tunnel_addr); + push @$sock_addr, $raddr; my $unix_socket_try = 0; # wait for the socket to become ready - while (! -S $raddr) { + while ($unix_socket_try <= 100) { $unix_socket_try++; - if ($unix_socket_try > 100) { - $self->{errors} = 1; - $self->finish_tunnel($self->{tunnel}); - die "Timeout, migration socket $ruri did not get ready"; + my $available = 0; + foreach my $sock (@$sock_addr) { + if (-S $sock) { + $available++; + } + } + + if ($available == @$sock_addr) { + last; } usleep(50000); } + if ($unix_socket_try > 100) { + $self->{errors} = 1; + $self->finish_tunnel($self->{tunnel}); + die "Timeout, migration socket $ruri did not get ready"; + } } elsif ($ruri =~ /^tcp:/) { my $tunnel_addr; @@ -671,6 +826,7 @@ sub phase2 { #fork tunnel for insecure migration, to send faster commands like resume $self->{tunnel} = $self->fork_tunnel(); } + $self->{tunnel}->{sock_addr} = $sock_addr if (@$sock_addr); my $start = time(); @@ -690,13 +846,20 @@ sub phase2 { my $source_drive = PVE::QemuServer::parse_drive($drive, $conf->{$drive}); my $target_drive = PVE::QemuServer::parse_drive($drive, $target->{drivestr}); - my $source_sid = PVE::Storage::Plugin::parse_volume_id($source_drive->{file}); - my $target_sid = PVE::Storage::Plugin::parse_volume_id($target_drive->{file}); + my $source_volid = $source_drive->{file}; + my $target_volid = $target_drive->{file}; + + my $source_sid = PVE::Storage::Plugin::parse_volume_id($source_volid); + my $target_sid = PVE::Storage::Plugin::parse_volume_id($target_volid); my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', [$source_sid, $target_sid], $opt_bwlimit); + my $bitmap = $target->{bitmap}; $self->log('info', "$drive: start migration to $nbd_uri"); - PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1, undef, $bwlimit); + PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 'skip', undef, $bwlimit, $bitmap); + + $self->{volume_map}->{$source_volid} = $target_volid; + $self->log('info', "volume '$source_volid' is '$target_volid' on the target\n"); } } @@ -734,12 +897,10 @@ sub phase2 { my $migrate_downtime = $defaults->{migrate_downtime}; $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime}); - if (defined($migrate_downtime)) { - # migrate-set-parameters expects limit in ms - $migrate_downtime *= 1000; - $self->log('info', "migration downtime limit: $migrate_downtime ms"); - $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime); - } + # migrate-set-parameters expects limit in ms + $migrate_downtime *= 1000; + $self->log('info', "migration downtime limit: $migrate_downtime ms"); + $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime); # set cachesize to 10% of the total memory my $memory = $conf->{memory} || $defaults->{memory}; @@ -862,11 +1023,13 @@ sub phase2 { if ($downtimecounter > 5) { $downtimecounter = 0; $migrate_downtime *= 2; - $self->log('info', "migrate_set_downtime: $migrate_downtime"); + $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms"); eval { - mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); + # migrate-set-parameters does not touch values not + # specified, so this only changes downtime-limit + mon_cmd($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime)); }; - $self->log('info', "migrate_set_downtime error: $@") if $@; + $self->log('info', "migrate-set-parameters error: $@") if $@; } } @@ -904,16 +1067,15 @@ sub phase2_cleanup { # cleanup ressources on target host if ($self->{storage_migration}) { - eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; if (my $err = $@) { $self->log('err', $err); } + } - eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; - if (my $err = $@) { - $self->log('err', $err); - } + eval { $self->cleanup_bitmaps() }; + if (my $err =$@) { + $self->log('err', $err); } my $nodename = PVE::INotify::nodename(); @@ -925,6 +1087,13 @@ sub phase2_cleanup { $self->{errors} = 1; } + # cleanup after stopping, otherwise disks might be in-use by target VM! + eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; + if (my $err = $@) { + $self->log('err', $err); + } + + if ($self->{tunnel}) { eval { finish_tunnel($self, $self->{tunnel}); }; if (my $err = $@) { @@ -960,22 +1129,23 @@ sub phase3_cleanup { my $tunnel = $self->{tunnel}; if ($self->{storage_migration}) { - # finish block-job - eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); }; + # finish block-job with block-job-cancel, to disconnect source VM from NBD + # to avoid it trying to re-establish it. We are in blockjob ready state, + # thus, this command changes to it to blockjob complete (see qapi docs) + eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, 'cancel'); }; if (my $err = $@) { eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; die "Failed to complete storage migration: $err\n"; - } else { - foreach my $target_drive (keys %{$self->{target_drive}}) { - my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{drivestr}); - $conf->{$target_drive} = PVE::QemuServer::print_drive($drive); - PVE::QemuConfig->write_config($vmid, $conf); - } } } + if ($self->{volume_map}) { + PVE::QemuConfig->update_volume_ids($conf, $self->{volume_map}); + PVE::QemuConfig->write_config($vmid, $conf); + } + # transfer replication state before move config $self->transfer_replication_state() if $self->{replicated_volumes}; @@ -1073,6 +1243,9 @@ sub phase3_cleanup { my $volids = $self->{online_local_volumes}; foreach my $volid (@$volids) { + # keep replicated volumes! + next if $self->{replicated_volumes}->{$volid}; + eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; if (my $err = $@) { $self->log('err', "removing local copy of '$volid' failed - $err");