use PVE::AbstractMigrate;
use IO::File;
use IPC::Open2;
+use POSIX qw( WNOHANG );
use PVE::INotify;
use PVE::Tools;
use PVE::Cluster;
sub finish_command_pipe {
my ($self, $cmdpipe, $timeout) = @_;
+ my $cpid = $cmdpipe->{pid};
+ return if !defined($cpid);
+
my $writer = $cmdpipe->{writer};
my $reader = $cmdpipe->{reader};
$writer->close();
$reader->close();
- my $cpid = $cmdpipe->{pid};
+ my $collect_child_process = sub {
+ my $res = waitpid($cpid, WNOHANG);
+ if (defined($res) && ($res == $cpid)) {
+ delete $cmdpipe->{cpid};
+ return 1;
+ } else {
+ return 0;
+ }
+ };
if ($timeout) {
for (my $i = 0; $i < $timeout; $i++) {
- return if !PVE::ProcFSTools::check_process_running($cpid);
+ return if &$collect_child_process();
sleep(1);
}
}
# wait again
for (my $i = 0; $i < 10; $i++) {
- return if !PVE::ProcFSTools::check_process_running($cpid);
+ return if &$collect_child_process();
sleep(1);
}
$self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
kill 9, $cpid;
sleep 1;
+
+ $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
+ if !&$collect_child_process();
}
sub fork_tunnel {
- my ($self, $nodeip, $lport, $rport) = @_;
+ my ($self, $tunnel_addr) = @_;
- my @localtunnelinfo = $lport ? ('-L' , "$lport:localhost:$rport" ) : ();
+ my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
- my $cmd = [@{$self->{rem_ssh}}, @localtunnelinfo, 'qm', 'mtunnel' ];
+ my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ];
my $tunnel = $self->fork_command_pipe($cmd);
$self->finish_command_pipe($tunnel, 30);
+ if ($tunnel->{sock_addr}) {
+ # ssh does not clean up on local host
+ my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; #
+ PVE::Tools::run_command($cmd);
+
+ # .. and just to be sure check on remote side
+ unshift @{$cmd}, @{$self->{rem_ssh}};
+ PVE::Tools::run_command($cmd);
+ }
+
die $err if $err;
}
sub lock_vm {
my ($self, $vmid, $code, @param) = @_;
- return PVE::QemuServer::lock_config($vmid, $code, @param);
+ return PVE::QemuConfig->lock_config($vmid, $code, @param);
}
sub prepare {
$self->{storecfg} = PVE::Storage::config();
- # test is VM exist
- my $conf = $self->{vmconf} = PVE::QemuServer::load_config($vmid);
+ # test if VM exists
+ my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
my $running = 0;
if (my $pid = PVE::QemuServer::check_running($vmid)) {
- die "cant migrate running VM without --online\n" if !$online;
+ die "can't migrate running VM without --online\n" if !$online;
$running = $pid;
- $self->{forcemachine} = PVE::QemuServer::get_current_qemu_machine($vmid);
+
+ $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
+
}
if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
}
}
- # activate volumes
my $vollist = PVE::QemuServer::get_vm_volumes($conf);
- PVE::Storage::activate_volumes($self->{storecfg}, $vollist);
- # fixme: check if storage is available on both nodes
+ my $need_activate = [];
+ foreach my $volid (@$vollist) {
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+
+ # check if storage is available on both nodes
+ my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
+ PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
+
+ if ($scfg->{shared}) {
+ # PVE::Storage::activate_storage checks this for non-shared storages
+ my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
+ warn "Used shared storage '$sid' is not online on source node!\n"
+ if !$plugin->check_connection($sid, $scfg);
+ } else {
+ # only activate if not shared
+ push @$need_activate, $volid;
+ }
+ }
+
+ # activate volumes
+ PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
# test ssh connection
my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
my $conf = $self->{vmconf};
+ # local volumes which have been copied
$self->{volumes} = [];
my $res = [];
eval {
- my $volhash = {};
- my $cdromhash = {};
+ # found local volumes and their origin
+ my $local_volumes = {};
+ my $local_volumes_errors = {};
+ my $other_errors = [];
+ my $abort = 0;
my $sharedvm = 1;
+ my $log_error = sub {
+ my ($msg, $volid) = @_;
+
+ if (defined($volid)) {
+ $local_volumes_errors->{$volid} = $msg;
+ } else {
+ push @$other_errors, $msg;
+ }
+ $abort = 1;
+ };
+
my @sids = PVE::Storage::storage_ids($self->{storecfg});
- foreach my $storeid (@sids) {
+ foreach my $storeid (@sids) {
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
- next if $scfg->{shared};
+ next if $scfg->{shared};
next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
- # get list from PVE::Storage (for unused volumes)
- my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
- PVE::Storage::foreach_volid($dl, sub {
- my ($volid, $sid, $volname) = @_;
+ # get list from PVE::Storage (for unused volumes)
+ my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
- # check if storage is available on target node
- PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
+ next if @{$dl->{$storeid}} == 0;
- $volhash->{$volid} = 1;
- $sharedvm = 0; # there is a non-shared disk
- });
- }
+ # check if storage is available on target node
+ PVE::Storage::storage_check_node($self->{storecfg}, $storeid, $self->{node});
+ $sharedvm = 0; # there is a non-shared disk
- # and add used, owned/non-shared disks (just to be sure we have all)
+ PVE::Storage::foreach_volid($dl, sub {
+ my ($volid, $sid, $volname) = @_;
- PVE::QemuServer::foreach_volid($conf, sub {
- my ($volid, $is_cdrom) = @_;
+ $local_volumes->{$volid} = 'storage';
+ });
+ }
+
+ my $test_volid = sub {
+ my ($volid, $is_cdrom, $snapname) = @_;
return if !$volid;
- die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
+ if ($volid =~ m|^/|) {
+ $local_volumes->{$volid} = 'config';
+ die "local file/device\n";
+ }
if ($is_cdrom) {
- die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
+ if ($volid eq 'cdrom') {
+ my $msg = "can't migrate local cdrom drive";
+ $msg .= " (referenced in snapshot '$snapname')"
+ if defined($snapname);
+
+ &$log_error("$msg\n");
+ return;
+ }
return if $volid eq 'none';
- $cdromhash->{$volid} = 1;
}
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
return if $scfg->{shared};
- die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
-
$sharedvm = 0;
+ $local_volumes->{$volid} = defined($snapname) ? 'snapshot' : 'config';
+
+ die "local cdrom image\n" if $is_cdrom;
+
my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
- die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
+ die "owned by other VM (owner = VM $owner)\n"
if !$owner || ($owner != $self->{vmid});
- $volhash->{$volid} = 1;
- });
+ if (defined($snapname)) {
+ # we cannot migrate shapshots on local storage
+ # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
+
+ my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
+
+ if (!($scfg->{type} eq 'zfspool') || ($format eq 'qcow2')) {
+ die "non-migratable snapshot exists\n";
+ }
+ }
+
+ die "referenced by linked clone(s)\n"
+ if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
+ };
+
+ my $test_drive = sub {
+ my ($ds, $drive, $snapname) = @_;
+
+ eval {
+ &$test_volid($drive->{file}, PVE::QemuServer::drive_is_cdrom($drive), $snapname);
+ };
+
+ &$log_error($@, $drive->{file}) if $@;
+ };
+
+ foreach my $snapname (keys %{$conf->{snapshots}}) {
+ eval {
+ &$test_volid($conf->{snapshots}->{$snapname}->{'vmstate'}, 0, undef)
+ if defined($conf->{snapshots}->{$snapname}->{'vmstate'});
+ };
+ &$log_error($@, $conf->{snapshots}->{$snapname}->{'vmstate'}) if $@;
+
+ PVE::QemuServer::foreach_drive($conf->{snapshots}->{$snapname}, $test_drive, $snapname);
+ }
+ PVE::QemuServer::foreach_drive($conf, $test_drive);
+
+ foreach my $vol (sort keys %$local_volumes) {
+ if ($local_volumes->{$vol} eq 'storage') {
+ $self->log('info', "found local disk '$vol' (via storage)\n");
+ } elsif ($local_volumes->{$vol} eq 'config') {
+ $self->log('info', "found local disk '$vol' (in current VM config)\n");
+ } elsif ($local_volumes->{$vol} eq 'snapshot') {
+ $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
+ } else {
+ $self->log('info', "found local disk '$vol'\n");
+ }
+ }
+
+ foreach my $vol (sort keys %$local_volumes_errors) {
+ $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
+ }
+ foreach my $err (@$other_errors) {
+ $self->log('warn', "$err");
+ }
if ($self->{running} && !$sharedvm) {
die "can't do online migration - VM uses local disks\n";
}
- # do some checks first
- foreach my $volid (keys %$volhash) {
+ if ($abort) {
+ die "can't migrate VM - check log\n";
+ }
+
+ # additional checks for local storage
+ foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
+ my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') ||
+ ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm');
+
die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
- if (!($scfg->{type} eq 'dir' || $scfg->{type} eq 'zfspool') && (!$sharedvm));
+ if !$migratable;
- # if file, check if a backing file exist
- if (!($scfg->{type} eq 'dir' || $scfg->{type} eq 'zfspool') && (!$sharedvm)) {
- my (undef, undef, undef, $parent) = PVE::Storage::volume_size_info($self->{storecfg}, $volid, 1);
- die "can't migrate '$volid' as it's a clone of '$parent'" if $parent;
+ # image is a linked clone on local storage, se we can't migrate.
+ if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
+ die "can't migrate '$volid' as it's a clone of '$basename'";
}
}
- foreach my $volid (keys %$volhash) {
+ foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
push @{$self->{volumes}}, $volid;
PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
# set migrate lock in config file
$conf->{lock} = 'migrate';
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ PVE::QemuConfig->write_config($vmid, $conf);
sync_disks($self, $vmid);
my $conf = $self->{vmconf};
delete $conf->{lock};
- eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) };
+ eval { PVE::QemuConfig->write_config($vmid, $conf) };
if (my $err = $@) {
$self->log('err', $err);
}
my $raddr;
my $rport;
+ my $ruri; # the whole migration dst. URI (protocol:address[:port])
my $nodename = PVE::INotify::nodename();
## start on remote node
$spice_ticket = $res->{ticket};
}
- push @$cmd , 'qm', 'start', $vmid, '--stateuri', 'tcp', '--skiplock', '--migratedfrom', $nodename;
+ push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
+
+ # we use TCP only for unsecure migrations as TCP ssh forward tunnels often
+ # did appeared to late (they are hard, if not impossible, to check for)
+ # secure migration use UNIX sockets now, this *breaks* compatibilty when trying
+ # to migrate from new to old but *not* from old to new.
+ my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg');
+
+ my $migration_type = 'secure';
+ if (defined($self->{opts}->{migration_type})) {
+ $migration_type = $self->{opts}->{migration_type};
+ } elsif (defined($datacenterconf->{migration}->{type})) {
+ $migration_type = $datacenterconf->{migration}->{type};
+ }
+
+ push @$cmd, '--migration_type', $migration_type;
+
+ push @$cmd, '--migration_network', $self->{opts}->{migration_network}
+ if $self->{opts}->{migration_network};
+
+ if ($migration_type eq 'insecure') {
+ push @$cmd, '--stateuri', 'tcp';
+ } else {
+ push @$cmd, '--stateuri', 'unix';
+ }
if ($self->{forcemachine}) {
push @$cmd, '--machine', $self->{forcemachine};
PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
my $line = shift;
- if ($line =~ m/^migration listens on tcp:([\d\.]+|localhost):(\d+)$/) {
+ if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
$raddr = $1;
$rport = int($2);
+ $ruri = "tcp:$raddr:$rport";
+ }
+ elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
+ $raddr = $1;
+ die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
+ $ruri = "unix:$raddr";
}
elsif ($line =~ m/^migration listens on port (\d+)$/) {
$raddr = "localhost";
$rport = int($1);
+ $ruri = "tcp:$raddr:$rport";
}
elsif ($line =~ m/^spice listens on port (\d+)$/) {
$spice_port = int($1);
die "unable to detect remote migration address\n" if !$raddr;
- ## create tunnel to remote port
- $self->log('info', "starting ssh migration tunnel");
- my $pfamily = PVE::Tools::get_host_address_family($nodename);
- my $lport = ($raddr eq "localhost") ? PVE::Tools::next_migrate_port($pfamily) : undef;
- $self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport);
+ if ($migration_type eq 'secure') {
+ $self->log('info', "start remote tunnel");
+
+ if ($ruri =~ /^unix:/) {
+ unlink $raddr;
+ $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr");
+ $self->{tunnel}->{sock_addr} = $raddr;
+
+ my $unix_socket_try = 0; # wait for the socket to become ready
+ while (! -S $raddr) {
+ $unix_socket_try++;
+ if ($unix_socket_try > 100) {
+ $self->{errors} = 1;
+ $self->finish_tunnel($self->{tunnel});
+ die "Timeout, migration socket $ruri did not get ready";
+ }
+
+ usleep(50000);
+ }
+
+ } elsif ($ruri =~ /^tcp:/) {
+ my $tunnel_addr;
+ if ($raddr eq "localhost") {
+ # for backwards compatibility with older qemu-server versions
+ my $pfamily = PVE::Tools::get_host_address_family($nodename);
+ my $lport = PVE::Tools::next_migrate_port($pfamily);
+ $tunnel_addr = "$lport:localhost:$rport";
+ }
+
+ $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
+
+ } else {
+ die "unsupported protocol in migration URI: $ruri\n";
+ }
+ }
my $start = time();
- $self->log('info', "starting online/live migration on $raddr:$rport");
+ $self->log('info', "starting online/live migration on $ruri");
$self->{livemigration} = 1;
# load_defaults
$self->log('info', "migrate_set_downtime error: $@") if $@;
}
+ $self->log('info', "set migration_caps");
eval {
PVE::QemuServer::set_migration_caps($vmid);
};
#set cachesize 10% of the total memory
my $cachesize = int($conf->{memory}*1048576/10);
+ $self->log('info', "set cachesize: $cachesize");
eval {
- PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => $cachesize);
+ PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
};
-
+ $self->log('info', "migrate-set-cache-size error: $@") if $@;
+
if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
my $rpcenv = PVE::RPCEnvironment::get();
my $authuser = $rpcenv->get_user();
}
+ $self->log('info', "start migrate command to $ruri");
eval {
- PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:$raddr:$rport");
+ PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri);
};
my $merr = $@;
- $self->log('info', "migrate uri => tcp:$raddr:$rport failed: $merr") if $merr;
+ $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
my $lstat = 0;
my $usleep = 2000000;
if (my $err = $@) {
$err_count++;
warn "query migrate failed: $err\n";
+ $self->log('info', "query migrate failed: $err");
if ($err_count <= 5) {
usleep(1000000);
next;
die "too many query migrate failures - aborting\n";
}
- if ($stat->{status} =~ m/^(setup)$/im) {
+ if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) {
sleep(1);
next;
}
- if ($stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
+ if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
$merr = undef;
$err_count = 0;
if ($stat->{status} eq 'completed') {
}
if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
+ $self->log('info', "migration status error: $stat->{status}");
die "aborting\n"
}
die "unable to parse migration status '$stat->{status}' - aborting\n";
}
}
+
+ # just to be sure that the tunnel gets closed on successful migration, on error
+ # phase2_cleanup closes it *after* stopping the remote waiting VM
+ if (!$self->{errors} && $self->{tunnel}) {
+ eval { finish_tunnel($self, $self->{tunnel}); };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
}
sub phase2_cleanup {
my $conf = $self->{vmconf};
delete $conf->{lock};
- eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) };
+ eval { PVE::QemuConfig->write_config($vmid, $conf) };
if (my $err = $@) {
$self->log('err', $err);
}
$self->log('err', $err);
$self->{errors} = 1;
}
+
+ if ($self->{tunnel}) {
+ eval { finish_tunnel($self, $self->{tunnel}); };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
}
sub phase3 {
return if $self->{phase2errors};
# move config to remote node
- my $conffile = PVE::QemuServer::config_file($vmid);
- my $newconffile = PVE::QemuServer::config_file($vmid, $self->{node});
+ my $conffile = PVE::QemuConfig->config_file($vmid);
+ my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
if !rename($conffile, $newconffile);
if ($self->{livemigration}) {
# now that config file is move, we can resume vm on target if livemigrate
- my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock'];
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
errfunc => sub {
my $line = shift;
$self->{errors} = 1;
}
- if ($self->{tunnel}) {
- eval { finish_tunnel($self, $self->{tunnel}); };
- if (my $err = $@) {
- $self->log('err', $err);
- $self->{errors} = 1;
- }
- }
-
# always deactivate volumes - avoid lvm LVs to be active on several nodes
eval {
my $vollist = PVE::QemuServer::get_vm_volumes($conf);