my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
# check if storage is available on both nodes
- my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
+ my $targetsid = $self->{opts}->{targetstorage} // $sid;
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
# local volumes which have been copied
$self->{volumes} = [];
- my $res = [];
+ my $override_targetsid = $self->{opts}->{targetstorage};
eval {
next if @{$dl->{$storeid}} == 0;
- my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $storeid;
+ my $targetsid = $override_targetsid // $storeid;
# check if storage is available on target node
PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
my ($volid, $attr) = @_;
if ($volid =~ m|^/|) {
+ return if $attr->{shared};
$local_volumes->{$volid}->{ref} = 'config';
die "local file/device\n";
}
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
+ my $targetsid = $override_targetsid // $sid;
# check if storage is available on both nodes
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
$self->log('warn', "$err");
}
- if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) {
- $self->{opts}->{targetstorage} = 1; #use same sid for remote local
- }
-
if ($abort) {
die "can't migrate VM - check log\n";
}
foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid}->{ref} eq 'config') {
+ my $targetsid = $override_targetsid // $sid;
+ if ($self->{running} && $local_volumes->{$volid}->{ref} eq 'config') {
push @{$self->{online_local_volumes}}, $volid;
} else {
next if $rep_volumes->{$volid};
push @{$self->{volumes}}, $volid;
my $insecure = $self->{opts}->{migration_type} eq 'insecure';
my $with_snapshots = $local_volumes->{$volid}->{snapshots};
- PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid,
+ PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid,
undef, undef, undef, undef, $insecure, $with_snapshots);
}
}
push @$cmd, '--machine', $self->{forcemachine};
}
- if ($self->{opts}->{targetstorage}) {
- push @$cmd, '--targetstorage', $self->{opts}->{targetstorage};
+ if ($self->{online_local_volumes}) {
+ push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1');
}
my $spice_port;
die "unable to detect remote migration address\n" if !$raddr;
+ $self->log('info', "start remote tunnel");
+
if ($migration_type eq 'secure') {
- $self->log('info', "start remote tunnel");
if ($ruri =~ /^unix:/) {
unlink $raddr;
} else {
die "unsupported protocol in migration URI: $ruri\n";
}
+ } else {
+ #fork tunnel for insecure migration, to send faster commands like resume
+ $self->{tunnel} = $self->fork_tunnel();
}
my $start = time();
- if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) {
+ if (defined($self->{online_local_volumes})) {
$self->{storage_migration} = 1;
$self->{storage_migration_jobs} = {};
$self->log('info', "starting storage migration");
if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
foreach my $drive (keys %{$self->{target_drive}}){
my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
- $self->log('info', "$drive: start migration to to $nbd_uri");
+ $self->log('info', "$drive: start migration to $nbd_uri");
PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1);
}
}
# set cachesize to 10% of the total memory
my $memory = $conf->{memory} || $defaults->{memory};
my $cachesize = int($memory * 1048576 / 10);
+ $cachesize = round_powerof2($cachesize);
+
$self->log('info', "set cachesize: $cachesize");
eval {
PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
$self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
my $lstat = 0;
- my $usleep = 2000000;
+ my $usleep = 1000000;
my $i = 0;
my $err_count = 0;
my $lastrem = undef;
my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
- #reduce sleep if remainig memory if lower than the everage transfert
- $usleep = 300000 if $avglstat && $rem < $avglstat;
+ # reduce sleep if remainig memory is lower than the average transfer speed
+ $usleep = 100000 if $avglstat && $rem < $avglstat;
$self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
"remaining ${rem}), total ${total})");
if ($self->{livemigration}) {
if ($self->{storage_migration}) {
- # remove drives referencing the nbd server from source
- # otherwise vm_stop might hang later on
- foreach my $drive (keys %{$self->{target_drive}}){
- PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "device_del", id => $drive);
- }
# stop nbd server on remote vm - requirement for resume since 2.9
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
$self->{errors} = 1;
}
}
+
+ if ($self->{storage_migration} && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && $self->{running}) {
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ }
}
# close tunnel on successful migration, on error phase2_cleanup closed it
# nothing to do
}
+sub round_powerof2 {
+ return 1 if $_[0] < 2;
+ return 2 << int(log($_[0]-1)/log(2));
+}
+
1;