use PVE::QemuServer;
use Time::HiRes qw( usleep );
use PVE::RPCEnvironment;
+use PVE::ReplicationConfig;
+use PVE::ReplicationState;
+use PVE::Replication;
use base qw(PVE::AbstractMigrate);
if !&$collect_child_process();
}
+sub read_tunnel {
+ my ($self, $tunnel, $timeout) = @_;
+
+ $timeout = 60 if !defined($timeout);
+
+ my $reader = $tunnel->{reader};
+
+ my $output;
+ eval {
+ PVE::Tools::run_with_timeout($timeout, sub { $output = <$reader>; });
+ };
+ die "reading from tunnel failed: $@\n" if $@;
+
+ chomp $output;
+
+ return $output;
+}
+
+sub write_tunnel {
+ my ($self, $tunnel, $timeout, $command) = @_;
+
+ $timeout = 60 if !defined($timeout);
+
+ my $writer = $tunnel->{writer};
+
+ eval {
+ PVE::Tools::run_with_timeout($timeout, sub {
+ print $writer "$command\n";
+ $writer->flush();
+ });
+ };
+ die "writing to tunnel failed: $@\n" if $@;
+
+ if ($tunnel->{version} && $tunnel->{version} >= 1) {
+ my $res = eval { $self->read_tunnel($tunnel, 10); };
+ die "no reply to command '$command': $@\n" if $@;
+
+ if ($res eq 'OK') {
+ return;
+ } else {
+ die "tunnel replied '$res' to command '$command'\n";
+ }
+ }
+}
+
sub fork_tunnel {
my ($self, $tunnel_addr) = @_;
my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
- my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ];
+ my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ];
my $tunnel = $self->fork_command_pipe($cmd);
- my $reader = $tunnel->{reader};
-
- my $helo;
eval {
- PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
+ my $helo = $self->read_tunnel($tunnel, 60);
die "no reply\n" if !$helo;
die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
die "got strange reply from mtunnel ('$helo')\n"
};
my $err = $@;
+ eval {
+ my $ver = $self->read_tunnel($tunnel, 10);
+ if ($ver =~ /^ver (\d+)$/) {
+ $tunnel->{version} = $1;
+ $self->log('info', "ssh tunnel $ver\n");
+ } else {
+ $err = "received invalid tunnel version string '$ver'\n" if !$err;
+ }
+ };
+
if ($err) {
$self->finish_command_pipe($tunnel);
die "can't open migration tunnel - $err";
sub finish_tunnel {
my ($self, $tunnel) = @_;
- my $writer = $tunnel->{writer};
-
- eval {
- PVE::Tools::run_with_timeout(30, sub {
- print $writer "quit\n";
- $writer->flush();
- });
- };
+ eval { $self->write_tunnel($tunnel, 30, 'quit'); };
my $err = $@;
$self->finish_command_pipe($tunnel, 30);
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
# check if storage is available on both nodes
+ my $targetsid = $self->{opts}->{targetstorage} // $sid;
+
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
- PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
+ PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
if ($scfg->{shared}) {
# PVE::Storage::activate_storage checks this for non-shared storages
if !$plugin->check_connection($sid, $scfg);
} else {
# only activate if not shared
+ next if ($volid =~ m/vm-\d+-cloudinit/);
push @$need_activate, $volid;
}
}
sub sync_disks {
my ($self, $vmid) = @_;
- $self->log('info', "copying disk images");
-
my $conf = $self->{vmconf};
+ # local volumes which have been copied
$self->{volumes} = [];
- my $res = [];
+ my $override_targetsid = $self->{opts}->{targetstorage};
eval {
- my $volhash = {};
- my $cdromhash = {};
+ # found local volumes and their origin
+ my $local_volumes = {};
+ my $local_volumes_errors = {};
+ my $other_errors = [];
+ my $abort = 0;
my $sharedvm = 1;
+ my $log_error = sub {
+ my ($msg, $volid) = @_;
+
+ if (defined($volid)) {
+ $local_volumes_errors->{$volid} = $msg;
+ } else {
+ push @$other_errors, $msg;
+ }
+ $abort = 1;
+ };
+
my @sids = PVE::Storage::storage_ids($self->{storecfg});
foreach my $storeid (@sids) {
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
next if @{$dl->{$storeid}} == 0;
+ my $targetsid = $override_targetsid // $storeid;
+
# check if storage is available on target node
- PVE::Storage::storage_check_node($self->{storecfg}, $storeid, $self->{node});
+ PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
$sharedvm = 0; # there is a non-shared disk
PVE::Storage::foreach_volid($dl, sub {
my ($volid, $sid, $volname) = @_;
- $volhash->{$volid} = 1;
+ $local_volumes->{$volid}->{ref} = 'storage';
});
}
my $test_volid = sub {
- my ($volid, $is_cdrom, $snapname) = @_;
+ my ($volid, $attr) = @_;
- return if !$volid;
+ if ($volid =~ m|^/|) {
+ return if $attr->{shared};
+ $local_volumes->{$volid}->{ref} = 'config';
+ die "local file/device\n";
+ }
- die "can't migrate local file/device '$volid'\n" if $volid =~ m|^/|;
+ my $snaprefs = $attr->{referenced_in_snapshot};
- if ($is_cdrom) {
- die "can't migrate local cdrom drive\n" if $volid eq 'cdrom';
+ if ($attr->{cdrom}) {
+ if ($volid eq 'cdrom') {
+ my $msg = "can't migrate local cdrom drive";
+ if (defined($snaprefs) && !$attr->{referenced_in_config}) {
+ my $snapnames = join(', ', sort keys %$snaprefs);
+ $msg .= " (referenced in snapshot - $snapnames)";
+ }
+ &$log_error("$msg\n");
+ return;
+ }
return if $volid eq 'none';
- $cdromhash->{$volid} = 1;
}
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $targetsid = $override_targetsid // $sid;
# check if storage is available on both nodes
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
- PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
+ PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
return if $scfg->{shared};
$sharedvm = 0;
- die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
+ $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot';
+
+ if ($attr->{cdrom}) {
+ if ($volid =~ /vm-\d+-cloudinit/) {
+ $local_volumes->{$volid}->{ref} = 'generated';
+ return;
+ }
+ die "local cdrom image\n";
+ }
my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
- die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
+ die "owned by other VM (owner = VM $owner)\n"
if !$owner || ($owner != $self->{vmid});
- if (defined($snapname)) {
+ my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
+ $local_volumes->{$volid}->{snapshots} = defined($snaprefs) || ($format =~ /^(?:qcow2|vmdk)$/);
+ if (defined($snaprefs)) {
# we cannot migrate shapshots on local storage
# exceptions: 'zfspool' or 'qcow2' files (on directory storage)
- my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
-
- if (($scfg->{type} eq 'zfspool') || ($format eq 'qcow2')) {
- $volhash->{$volid} = 1;
- return;
+ die "online storage migration not possible if snapshot exists\n" if $self->{running};
+ if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) {
+ die "non-migratable snapshot exists\n";
}
+ }
- die "can't migrate snapshot of local volume '$volid'\n";
+ die "referenced by linked clone(s)\n"
+ if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
+ };
+ PVE::QemuServer::foreach_volid($conf, sub {
+ my ($volid, $attr) = @_;
+ eval { $test_volid->($volid, $attr); };
+ if (my $err = $@) {
+ &$log_error($err, $volid);
+ }
+ });
+
+ foreach my $vol (sort keys %$local_volumes) {
+ my $ref = $local_volumes->{$vol}->{ref};
+ if ($ref eq 'storage') {
+ $self->log('info', "found local disk '$vol' (via storage)\n");
+ } elsif ($ref eq 'config') {
+ &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
+ if $self->{running} && !$self->{opts}->{"with-local-disks"};
+ $self->log('info', "found local disk '$vol' (in current VM config)\n");
+ } elsif ($ref eq 'snapshot') {
+ $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
+ } elsif ($ref eq 'generated') {
+ $self->log('info', "found generated disk '$vol' (in current VM config)\n");
} else {
- $volhash->{$volid} = 1;
+ $self->log('info', "found local disk '$vol'\n");
}
- };
+ }
- PVE::QemuServer::foreach_volid($conf, $test_volid);
- foreach my $snapname (keys %{$conf->{snapshots}}) {
- PVE::QemuServer::foreach_volid($conf->{snapshots}->{$snapname}, $test_volid, $snapname);
+ foreach my $vol (sort keys %$local_volumes_errors) {
+ $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
+ }
+ foreach my $err (@$other_errors) {
+ $self->log('warn', "$err");
}
- if ($self->{running} && !$sharedvm) {
- die "can't do online migration - VM uses local disks\n";
+ if ($abort) {
+ die "can't migrate VM - check log\n";
}
# additional checks for local storage
- foreach my $volid (keys %$volhash) {
+ foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
}
}
- foreach my $volid (keys %$volhash) {
+ my $rep_volumes;
+
+ $self->log('info', "copying disk images");
+
+ my $rep_cfg = PVE::ReplicationConfig->new();
+
+ if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) {
+ die "can't live migrate VM with replicated volumes\n" if $self->{running};
+ my $start_time = time();
+ my $logfunc = sub { my ($msg) = @_; $self->log('info', $msg); };
+ $rep_volumes = PVE::Replication::run_replication(
+ 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc);
+ $self->{replicated_volumes} = $rep_volumes;
+ }
+
+ foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- push @{$self->{volumes}}, $volid;
- PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
+ my $targetsid = $override_targetsid // $sid;
+ my $ref = $local_volumes->{$volid}->{ref};
+ if ($self->{running} && $ref eq 'config') {
+ push @{$self->{online_local_volumes}}, $volid;
+ } elsif ($ref eq 'generated') {
+ # skip all generated volumes but queue them for deletion in phase3_cleanup
+ push @{$self->{volumes}}, $volid;
+ next;
+ } else {
+ next if $rep_volumes->{$volid};
+ push @{$self->{volumes}}, $volid;
+ my $opts = $self->{opts};
+ my $insecure = $opts->{migration_type} eq 'insecure';
+ my $with_snapshots = $local_volumes->{$volid}->{snapshots};
+ # use 'migrate' limit for transfer to other node
+ my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$targetsid, $sid], $opts->{bwlimit});
+ # JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps
+ $bwlimit = $bwlimit * 1024 if defined($bwlimit);
+
+ PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid,
+ undef, undef, undef, $bwlimit, $insecure, $with_snapshots);
+ }
}
};
die "Failed to sync data - $@" if $@;
}
+sub cleanup_remotedisks {
+ my ($self) = @_;
+
+ foreach my $target_drive (keys %{$self->{target_drive}}) {
+
+ my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
+
+ my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
+
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
+}
+
sub phase1 {
my ($self, $vmid) = @_;
push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
- # we use TCP only for unsecure migrations as TCP ssh forward tunnels often
- # did appeared to late (they are hard, if not impossible, to check for)
- # secure migration use UNIX sockets now, this *breaks* compatibilty when trying
- # to migrate from new to old but *not* from old to new.
- my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg');
- my $secure_migration = ($datacenterconf->{migration_unsecure}) ? 0 : 1;
+ my $migration_type = $self->{opts}->{migration_type};
- if (!$secure_migration) {
+ push @$cmd, '--migration_type', $migration_type;
+
+ push @$cmd, '--migration_network', $self->{opts}->{migration_network}
+ if $self->{opts}->{migration_network};
+
+ if ($migration_type eq 'insecure') {
push @$cmd, '--stateuri', 'tcp';
} else {
push @$cmd, '--stateuri', 'unix';
push @$cmd, '--machine', $self->{forcemachine};
}
+ if ($self->{online_local_volumes}) {
+ push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1');
+ }
+
my $spice_port;
# Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
$rport = int($1);
$ruri = "tcp:$raddr:$rport";
}
- elsif ($line =~ m/^spice listens on port (\d+)$/) {
+ elsif ($line =~ m/^spice listens on port (\d+)$/) {
$spice_port = int($1);
}
+ elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
+ my $volid = $4;
+ my $nbd_uri = "nbd:$1:$2:exportname=$3";
+ my $targetdrive = $3;
+ $targetdrive =~ s/drive-//g;
+
+ $self->{target_drive}->{$targetdrive}->{volid} = $volid;
+ $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
+
+ }
}, errfunc => sub {
my $line = shift;
$self->log('info', $line);
die "unable to detect remote migration address\n" if !$raddr;
- if ($secure_migration) {
- $self->log('info', "start remote tunnel");
+ $self->log('info', "start remote tunnel");
+
+ if ($migration_type eq 'secure') {
if ($ruri =~ /^unix:/) {
unlink $raddr;
} else {
die "unsupported protocol in migration URI: $ruri\n";
}
+ } else {
+ #fork tunnel for insecure migration, to send faster commands like resume
+ $self->{tunnel} = $self->fork_tunnel();
}
my $start = time();
+
+ my $opt_bwlimit = $self->{opts}->{bwlimit};
+
+ if (defined($self->{online_local_volumes})) {
+ $self->{storage_migration} = 1;
+ $self->{storage_migration_jobs} = {};
+ $self->log('info', "starting storage migration");
+
+ die "The number of local disks does not match between the source and the destination.\n"
+ if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
+ foreach my $drive (keys %{$self->{target_drive}}){
+ my $target = $self->{target_drive}->{$drive};
+ my $nbd_uri = $target->{nbd_uri};
+ my $source_sid = PVE::Storage::Plugin::parse_volume_id($conf->{$drive});
+ my $target_sid = PVE::Storage::Plugin::parse_volume_id($target->{volid});
+ my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$source_sid, $target_sid], $opt_bwlimit);
+
+ $self->log('info', "$drive: start migration to $nbd_uri");
+ PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1, undef, $bwlimit);
+ }
+ }
+
$self->log('info', "starting online/live migration on $ruri");
$self->{livemigration} = 1;
# load_defaults
my $defaults = PVE::QemuServer::load_defaults();
- # always set migrate speed (overwrite kvm default of 32m)
- # we set a very hight default of 8192m which is basically unlimited
- my $migrate_speed = $defaults->{migrate_speed} || 8192;
- $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
- $migrate_speed = $migrate_speed * 1048576;
+ # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
+ # migrate_speed parameter in qm.conf - take the lower of the two.
+ my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', undef, $opt_bwlimit) // 0;
+ my $migrate_speed = $conf->{migrate_speed} // $bwlimit;
+ # migrate_speed is in MB/s, bwlimit in KB/s
+ $migrate_speed *= 1024;
+
+ $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed;
+
+ # always set migrate speed (overwrite kvm default of 32m) we set a very high
+ # default of 8192m which is basically unlimited
+ $migrate_speed ||= ($defaults->{migrate_speed} || 8192) * 1024;
+
+ # qmp takes migrate_speed in B/s.
+ $migrate_speed *= 1024;
$self->log('info', "migrate_set_speed: $migrate_speed");
eval {
PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
};
warn $@ if $@;
- #set cachesize 10% of the total memory
- my $cachesize = int($conf->{memory}*1048576/10);
+ # set cachesize to 10% of the total memory
+ my $memory = $conf->{memory} || $defaults->{memory};
+ my $cachesize = int($memory * 1048576 / 10);
+ $cachesize = round_powerof2($cachesize);
+
$self->log('info', "set cachesize: $cachesize");
eval {
PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
- my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
+ my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
$self->log('info', "spice client_migrate_info");
$self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
my $lstat = 0;
- my $usleep = 2000000;
+ my $usleep = 1000000;
my $i = 0;
my $err_count = 0;
my $lastrem = undef;
if ($stat->{status} eq 'completed') {
my $delay = time() - $start;
if ($delay > 0) {
- my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
+ my $mbps = sprintf "%.2f", $memory / $delay;
my $downtime = $stat->{downtime} || 0;
$self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
}
my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
- #reduce sleep if remainig memory if lower than the everage transfert
- $usleep = 300000 if $avglstat && $rem < $avglstat;
+ # reduce sleep if remainig memory is lower than the average transfer speed
+ $usleep = 100000 if $avglstat && $rem < $avglstat;
$self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
"remaining ${rem}), total ${total})");
die "unable to parse migration status '$stat->{status}' - aborting\n";
}
}
-
- # just to be sure that the tunnel gets closed on successful migration, on error
- # phase2_cleanup closes it *after* stopping the remote waiting VM
- if (!$self->{errors} && $self->{tunnel}) {
- eval { finish_tunnel($self, $self->{tunnel}); };
- if (my $err = $@) {
- $self->log('err', $err);
- $self->{errors} = 1;
- }
- }
}
sub phase2_cleanup {
}
# cleanup ressources on target host
+ if ($self->{storage_migration}) {
+
+ eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ }
+
+ eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ }
+ }
+
my $nodename = PVE::INotify::nodename();
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
my $conf = $self->{vmconf};
return if $self->{phase2errors};
+ my $tunnel = $self->{tunnel};
+
+ if ($self->{storage_migration}) {
+ # finish block-job
+ eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); };
+
+ if (my $err = $@) {
+ eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
+ eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
+ die "Failed to completed storage migration\n";
+ } else {
+ foreach my $target_drive (keys %{$self->{target_drive}}) {
+ my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
+ $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+ }
+ }
+
+ # transfer replication state before move config
+ $self->transfer_replication_state() if $self->{replicated_volumes};
+
# move config to remote node
my $conffile = PVE::QemuConfig->config_file($vmid);
my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
if !rename($conffile, $newconffile);
+ $self->switch_replication_job_target() if $self->{replicated_volumes};
+
if ($self->{livemigration}) {
- # now that config file is move, we can resume vm on target if livemigrate
- my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
- eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
- errfunc => sub {
- my $line = shift;
- $self->log('err', $line);
- });
- };
+ if ($self->{storage_migration}) {
+ # stop nbd server on remote vm - requirement for resume since 2.9
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
+
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
+
+ # config moved and nbd server stopped - now we can resume vm on target
+ if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) {
+ eval {
+ $self->write_tunnel($tunnel, 30, "resume $vmid");
+ };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ } else {
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
+ my $logf = sub {
+ my $line = shift;
+ $self->log('err', $line);
+ };
+ eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
+
+ if ($self->{storage_migration} && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && $self->{running}) {
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ }
+ }
+
+ # close tunnel on successful migration, on error phase2_cleanup closed it
+ if ($tunnel) {
+ eval { finish_tunnel($self, $tunnel); };
if (my $err = $@) {
$self->log('err', $err);
$self->{errors} = 1;
}
eval {
-
my $timer = 0;
if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
$self->log('info', "Waiting for spice server migration");
last if $timer > 50;
$timer ++;
usleep(200000);
- }
+ }
}
};
$self->{errors} = 1;
}
+ if($self->{storage_migration}) {
+ # destroy local copies
+ my $volids = $self->{online_local_volumes};
+
+ foreach my $volid (@$volids) {
+ eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
+ if (my $err = $@) {
+ $self->log('err', "removing local copy of '$volid' failed - $err");
+ $self->{errors} = 1;
+ last if $err =~ /^interrupted by signal$/;
+ }
+ }
+
+ }
+
# clear migrate lock
my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
$self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
# nothing to do
}
+sub round_powerof2 {
+ return 1 if $_[0] < 2;
+ return 2 << int(log($_[0]-1)/log(2));
+}
+
1;