use PVE::Tools;
use PVE::Cluster;
use PVE::Storage;
+use PVE::ReplicationTools;
use PVE::QemuServer;
use Time::HiRes qw( usleep );
use PVE::RPCEnvironment;
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
# check if storage is available on both nodes
+ my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
+
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
- PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
+ PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
if ($scfg->{shared}) {
# PVE::Storage::activate_storage checks this for non-shared storages
sub sync_disks {
my ($self, $vmid) = @_;
- $self->log('info', "copying disk images");
-
my $conf = $self->{vmconf};
+ # local volumes which have been copied
$self->{volumes} = [];
my $res = [];
eval {
- my $volhash = {};
+ # found local volumes and their origin
+ my $local_volumes = {};
+ my $local_volumes_errors = {};
+ my $other_errors = [];
+ my $abort = 0;
my $sharedvm = 1;
+ my $log_error = sub {
+ my ($msg, $volid) = @_;
+
+ if (defined($volid)) {
+ $local_volumes_errors->{$volid} = $msg;
+ } else {
+ push @$other_errors, $msg;
+ }
+ $abort = 1;
+ };
+
my @sids = PVE::Storage::storage_ids($self->{storecfg});
foreach my $storeid (@sids) {
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
next if @{$dl->{$storeid}} == 0;
+ my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $storeid;
+
# check if storage is available on target node
- PVE::Storage::storage_check_node($self->{storecfg}, $storeid, $self->{node});
+ PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
$sharedvm = 0; # there is a non-shared disk
PVE::Storage::foreach_volid($dl, sub {
my ($volid, $sid, $volname) = @_;
- $volhash->{$volid} = 'storage';
+ $local_volumes->{$volid} = 'storage';
});
}
return if !$volid;
- die "can't migrate local file/device '$volid'\n" if $volid =~ m|^/|;
+ if ($volid =~ m|^/|) {
+ $local_volumes->{$volid} = 'config';
+ die "local file/device\n";
+ }
if ($is_cdrom) {
- die "can't migrate local cdrom drive\n" if $volid eq 'cdrom';
+ if ($volid eq 'cdrom') {
+ my $msg = "can't migrate local cdrom drive";
+ $msg .= " (referenced in snapshot '$snapname')"
+ if defined($snapname);
+
+ &$log_error("$msg\n");
+ return;
+ }
return if $volid eq 'none';
}
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
# check if storage is available on both nodes
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
- PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
+ PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
return if $scfg->{shared};
$sharedvm = 0;
- $volhash->{$volid} = defined($snapname) ? 'snapshot' : 'config';
+ $local_volumes->{$volid} = defined($snapname) ? 'snapshot' : 'config';
- die "can't migrate local cdrom '$volid'\n" if $is_cdrom;
+ die "local cdrom image\n" if $is_cdrom;
my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
- die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
+ die "owned by other VM (owner = VM $owner)\n"
if !$owner || ($owner != $self->{vmid});
if (defined($snapname)) {
# exceptions: 'zfspool' or 'qcow2' files (on directory storage)
my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
-
- if (($scfg->{type} eq 'zfspool') || ($format eq 'qcow2')) {
- return;
+ die "online storage migration not possible if snapshot exists\n" if $self->{running};
+ if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) {
+ die "non-migratable snapshot exists\n";
}
-
- die "can't migrate snapshot of local volume '$volid'\n";
-
}
+
+ die "referenced by linked clone(s)\n"
+ if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
};
my $test_drive = sub {
my ($ds, $drive, $snapname) = @_;
- &$test_volid($drive->{file}, PVE::QemuServer::drive_is_cdrom($drive), $snapname);
+ eval {
+ &$test_volid($drive->{file}, PVE::QemuServer::drive_is_cdrom($drive), $snapname);
+ };
+
+ &$log_error($@, $drive->{file}) if $@;
};
- PVE::QemuServer::foreach_drive($conf, $test_drive);
foreach my $snapname (keys %{$conf->{snapshots}}) {
- &$test_volid($conf->{snapshots}->{$snapname}->{'vmstate'}, 0, undef)
- if defined($conf->{snapshots}->{$snapname}->{'vmstate'});
+ eval {
+ &$test_volid($conf->{snapshots}->{$snapname}->{'vmstate'}, 0, undef)
+ if defined($conf->{snapshots}->{$snapname}->{'vmstate'});
+ };
+ &$log_error($@, $conf->{snapshots}->{$snapname}->{'vmstate'}) if $@;
+
PVE::QemuServer::foreach_drive($conf->{snapshots}->{$snapname}, $test_drive, $snapname);
}
+ PVE::QemuServer::foreach_drive($conf, $test_drive);
- foreach my $vol (sort keys %$volhash) {
- if ($volhash->{$vol} eq 'storage') {
+ foreach my $vol (sort keys %$local_volumes) {
+ if ($local_volumes->{$vol} eq 'storage') {
$self->log('info', "found local disk '$vol' (via storage)\n");
- } elsif ($volhash->{$vol} eq 'config') {
+ } elsif ($local_volumes->{$vol} eq 'config') {
+ die "can't live migrate attached local disks without with-local-disks option\n" if $self->{running} && !$self->{opts}->{"with-local-disks"};
$self->log('info', "found local disk '$vol' (in current VM config)\n");
- } elsif ($volhash->{$vol} eq 'snapshot') {
+ } elsif ($local_volumes->{$vol} eq 'snapshot') {
$self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
} else {
$self->log('info', "found local disk '$vol'\n");
}
}
- if ($self->{running} && !$sharedvm) {
- die "can't do online migration - VM uses local disks\n";
+ foreach my $vol (sort keys %$local_volumes_errors) {
+ $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
+ }
+ foreach my $err (@$other_errors) {
+ $self->log('warn', "$err");
+ }
+
+ if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) {
+ $self->{opts}->{targetstorage} = 1; #use same sid for remote local
+ }
+
+ if ($abort) {
+ die "can't migrate VM - check log\n";
}
# additional checks for local storage
- foreach my $volid (keys %$volhash) {
+ foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
}
}
- foreach my $volid (keys %$volhash) {
+ $self->log('info', "copying disk images");
+
+ foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- push @{$self->{volumes}}, $volid;
- PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
+ if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid} eq 'config') {
+ push @{$self->{online_local_volumes}}, $volid;
+ } else {
+ push @{$self->{volumes}}, $volid;
+ PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
+ }
}
};
die "Failed to sync data - $@" if $@;
}
+sub cleanup_remotedisks {
+ my ($self) = @_;
+
+ foreach my $target_drive (keys %{$self->{target_drive}}) {
+
+ my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
+
+ my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
+
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
+}
+
sub phase1 {
my ($self, $vmid) = @_;
sync_disks($self, $vmid);
+ # set new replica_target if we migrate to replica target.
+ if ($conf->{replica}) {
+ $self->log('info', "change replica target to Node: $self->{opts}->{node}");
+ if ($conf->{replica_target} eq $self->{node}) {
+ $conf->{replica_target} = $self->{opts}->{node};
+ }
+
+ PVE::ReplicationTools::job_remove($vmid);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
};
sub phase1_cleanup {
# secure migration use UNIX sockets now, this *breaks* compatibilty when trying
# to migrate from new to old but *not* from old to new.
my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg');
- my $secure_migration = ($datacenterconf->{migration_unsecure}) ? 0 : 1;
- if (!$secure_migration) {
+ my $migration_type = 'secure';
+ if (defined($self->{opts}->{migration_type})) {
+ $migration_type = $self->{opts}->{migration_type};
+ } elsif (defined($datacenterconf->{migration}->{type})) {
+ $migration_type = $datacenterconf->{migration}->{type};
+ }
+
+ push @$cmd, '--migration_type', $migration_type;
+
+ push @$cmd, '--migration_network', $self->{opts}->{migration_network}
+ if $self->{opts}->{migration_network};
+
+ if ($migration_type eq 'insecure') {
push @$cmd, '--stateuri', 'tcp';
} else {
push @$cmd, '--stateuri', 'unix';
push @$cmd, '--machine', $self->{forcemachine};
}
+ if ($self->{opts}->{targetstorage}) {
+ push @$cmd, '--targetstorage', $self->{opts}->{targetstorage};
+ }
+
my $spice_port;
# Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
elsif ($line =~ m/^spice listens on port (\d+)$/) {
$spice_port = int($1);
}
+ elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
+ my $volid = $4;
+ my $nbd_uri = "nbd:$1:$2:exportname=$3";
+ my $targetdrive = $3;
+ $targetdrive =~ s/drive-//g;
+
+ $self->{target_drive}->{$targetdrive}->{volid} = $volid;
+ $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
+
+ }
}, errfunc => sub {
my $line = shift;
$self->log('info', $line);
die "unable to detect remote migration address\n" if !$raddr;
- if ($secure_migration) {
+ if ($migration_type eq 'secure') {
$self->log('info', "start remote tunnel");
if ($ruri =~ /^unix:/) {
}
my $start = time();
+
+ if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) {
+ $self->{storage_migration} = 1;
+ $self->{storage_migration_jobs} = {};
+ $self->log('info', "starting storage migration");
+
+ die "The number of local disks does not match between the source and the destination.\n"
+ if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
+ foreach my $drive (keys %{$self->{target_drive}}){
+ my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
+ $self->log('info', "$drive: start migration to to $nbd_uri");
+ PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1);
+ }
+ }
+
$self->log('info', "starting online/live migration on $ruri");
$self->{livemigration} = 1;
}
# cleanup ressources on target host
+ if ($self->{storage_migration}) {
+
+ eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ }
+
+ eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ }
+ }
+
my $nodename = PVE::INotify::nodename();
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
my $volids = $self->{volumes};
return if $self->{phase2errors};
+ my $synced_volumes = PVE::ReplicationTools::get_syncable_guestdisks($self->{vmconf}, 'qemu')
+ if $self->{vmconf}->{replica};
+
+
# destroy local copies
foreach my $volid (@$volids) {
+ # do not destroy if new target is local_host
+ next if $self->{vmconf}->{replica} &&
+ defined($synced_volumes->{$volid}) &&
+ $self->{vmconf}->{replica_target} eq $self->{opts}->{node};
+
eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
if (my $err = $@) {
$self->log('err', "removing local copy of '$volid' failed - $err");
my $conf = $self->{vmconf};
return if $self->{phase2errors};
+ if ($self->{storage_migration}) {
+ # finish block-job
+ eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); };
+
+ if (my $err = $@) {
+ eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
+ eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
+ die "Failed to completed storage migration\n";
+ } else {
+ foreach my $target_drive (keys %{$self->{target_drive}}) {
+ my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
+ $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+ }
+ }
+
# move config to remote node
my $conffile = PVE::QemuConfig->config_file($vmid);
my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
if !rename($conffile, $newconffile);
if ($self->{livemigration}) {
- # now that config file is move, we can resume vm on target if livemigrate
+ if ($self->{storage_migration}) {
+ # remove drives referencing the nbd server from source
+ # otherwise vm_stop might hang later on
+ foreach my $drive (keys %{$self->{target_drive}}){
+ PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "device_del", id => $drive);
+ }
+ # stop nbd server on remote vm - requirement for resume since 2.9
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
+
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
+ # config moved and nbd server stopped - now we can resume vm on target
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
errfunc => sub {
}
eval {
-
my $timer = 0;
if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
$self->log('info', "Waiting for spice server migration");
$self->{errors} = 1;
}
+ if($self->{storage_migration}) {
+ # destroy local copies
+ my $volids = $self->{online_local_volumes};
+
+ foreach my $volid (@$volids) {
+ eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
+ if (my $err = $@) {
+ $self->log('err', "removing local copy of '$volid' failed - $err");
+ $self->{errors} = 1;
+ last if $err =~ /^interrupted by signal$/;
+ }
+ }
+
+ }
+
# clear migrate lock
my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
$self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
+
+ if ($self->{vmconf}->{replica}) {
+ my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'set', $vmid, '--replica'];
+ $self->cmd_logerr($cmd, errmsg => "failed to activate replica");
+ }
}
sub final_cleanup {