use PVE::Storage;
use PVE::Tools;
+use PVE::QemuConfig;
+use PVE::QemuServer::CPUConfig;
use PVE::QemuServer::Drive;
use PVE::QemuServer::Helpers qw(min_version);
use PVE::QemuServer::Machine;
}
sub fork_tunnel {
- my ($self, $tunnel_addr) = @_;
+ my ($self, $ssh_forward_info) = @_;
my @localtunnelinfo = ();
- foreach my $addr (@$tunnel_addr) {
+ foreach my $addr (@$ssh_forward_info) {
push @localtunnelinfo, '-L', $addr;
}
$self->finish_command_pipe($tunnel, 30);
- if ($tunnel->{sock_addr}) {
+ if (my $unix_sockets = $tunnel->{unix_sockets}) {
# ssh does not clean up on local host
- my $cmd = ['rm', '-f', @{$tunnel->{sock_addr}}]; #
+ my $cmd = ['rm', '-f', @$unix_sockets];
PVE::Tools::run_command($cmd);
# .. and just to be sure check on remote side
$self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf);
+ # To support custom CPU types, we keep QEMU's "-cpu" parameter intact.
+ # Since the parameter itself contains no reference to a custom model,
+ # this makes migration independent of changes to "cpu-models.conf".
+ if ($conf->{cpu}) {
+ my $cpuconf = PVE::QemuServer::CPUConfig::parse_cpu_conf_basic($conf->{cpu});
+ if ($cpuconf && PVE::QemuServer::CPUConfig::is_custom_model($cpuconf->{cputype})) {
+ $self->{forcecpu} = PVE::QemuServer::CPUConfig::get_cpu_from_running_vm($pid);
+ }
+ }
}
+
my $loc_res = PVE::QemuServer::check_local_resources($conf, 1);
if (scalar @$loc_res) {
if ($self->{running} || !$self->{opts}->{force}) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
# check if storage is available on both nodes
- my $targetsid = $self->{opts}->{targetstorage} // $sid;
+ my $targetsid = PVE::QemuServer::map_storage($self->{opts}->{storagemap}, $sid);
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
my $conf = $self->{vmconf};
# local volumes which have been copied
+ # and their old_id => new_id pairs
$self->{volumes} = [];
+ $self->{volume_map} = {};
- my $override_targetsid = $self->{opts}->{targetstorage};
-
+ my $storecfg = $self->{storecfg};
eval {
# found local volumes and their origin
$abort = 1;
};
- my @sids = PVE::Storage::storage_ids($self->{storecfg});
+ my @sids = PVE::Storage::storage_ids($storecfg);
foreach my $storeid (@sids) {
- my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
next if $scfg->{shared};
- next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
+ next if !PVE::Storage::storage_check_enabled($storecfg, $storeid, undef, 1);
# get list from PVE::Storage (for unused volumes)
- my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
+ my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, $vmid);
next if @{$dl->{$storeid}} == 0;
- my $targetsid = $override_targetsid // $storeid;
-
+ my $targetsid = PVE::QemuServer::map_storage($self->{opts}->{storagemap}, $storeid);
# check if storage is available on target node
- PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
+ PVE::Storage::storage_check_node($storecfg, $targetsid, $self->{node});
+
+ # grandfather in existing mismatches
+ if ($targetsid ne $storeid) {
+ my $target_scfg = PVE::Storage::storage_config($storecfg, $targetsid);
+ die "content type 'images' is not available on storage '$targetsid'\n"
+ if !$target_scfg->{content}->{images};
+ }
PVE::Storage::foreach_volid($dl, sub {
my ($volid, $sid, $volinfo) = @_;
});
}
- my $replicatable_volumes = PVE::QemuConfig->get_replicatable_volumes($self->{storecfg}, $self->{vmid}, $conf, 0, 1);
+ my $rep_cfg = PVE::ReplicationConfig->new();
+ my $replication_jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node});
+ my $replicatable_volumes = !$replication_jobcfg ? {}
+ : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
my $test_volid = sub {
my ($volid, $attr) = @_;
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $targetsid = $override_targetsid // $sid;
+ my $targetsid = PVE::QemuServer::map_storage($self->{opts}->{storagemap}, $sid);
# check if storage is available on both nodes
- my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
- PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
+ my $scfg = PVE::Storage::storage_check_node($storecfg, $sid);
+ PVE::Storage::storage_check_node($storecfg, $targetsid, $self->{node});
return if $scfg->{shared};
$local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot';
+ $local_volumes->{$volid}->{ref} = 'storage' if $attr->{is_unused};
+
+ $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0;
if ($attr->{cdrom}) {
if ($volid =~ /vm-\d+-cloudinit/) {
die "local cdrom image\n";
}
- my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
+ my ($path, $owner) = PVE::Storage::path($storecfg, $volid);
die "owned by other VM (owner = VM $owner)\n"
- if !$owner || ($owner != $self->{vmid});
+ if !$owner || ($owner != $vmid);
+
+ return if $attr->{is_vmstate};
if (defined($snaprefs)) {
$local_volumes->{$volid}->{snapshots} = 1;
}
die "referenced by linked clone(s)\n"
- if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
+ if PVE::Storage::volume_is_base_and_used($storecfg, $volid);
};
PVE::QemuServer::foreach_volid($conf, sub {
# additional checks for local storage
foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
my $migratable = $scfg->{type} =~ /^(?:dir|zfspool|lvmthin|lvm)$/;
if !$migratable;
# image is a linked clone on local storage, se we can't migrate.
- if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
+ if (my $basename = (PVE::Storage::parse_volname($storecfg, $volid))[3]) {
die "can't migrate '$volid' as it's a clone of '$basename'";
}
}
- my $rep_cfg = PVE::ReplicationConfig->new();
- if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) {
+ if ($replication_jobcfg) {
if ($self->{running}) {
my $version = PVE::QemuServer::kvm_user_version();
}
my $live_replicatable_volumes = {};
- PVE::QemuServer::foreach_drive($conf, sub {
+ PVE::QemuConfig->foreach_volume($conf, sub {
my ($ds, $drive) = @_;
my $volid = $drive->{file};
my $start_time = time();
my $logfunc = sub { $self->log('info', shift) };
$self->{replicated_volumes} = PVE::Replication::run_replication(
- 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc);
+ 'PVE::QemuConfig', $replication_jobcfg, $start_time, $start_time, $logfunc);
}
# sizes in config have to be accurate for remote node to correctly
# allocate disks, rescan to be sure
- my $volid_hash = PVE::QemuServer::scan_volids($self->{storecfg}, $vmid);
- PVE::QemuServer::foreach_drive($conf, sub {
+ my $volid_hash = PVE::QemuServer::scan_volids($storecfg, $vmid);
+ PVE::QemuConfig->foreach_volume($conf, sub {
my ($key, $drive) = @_;
+ return if $key eq 'efidisk0'; # skip efidisk, will be handled later
+ return if !defined($local_volumes->{$key}); # only update sizes for local volumes
+
my ($updated, $old_size, $new_size) = PVE::QemuServer::Drive::update_disksize($drive, $volid_hash);
if (defined($updated)) {
$conf->{$key} = PVE::QemuServer::print_drive($updated);
}
});
+ # we want to set the efidisk size in the config to the size of the
+ # real OVMF_VARS.fd image, else we can create a too big image, which does not work
+ if (defined($conf->{efidisk0})) {
+ PVE::QemuServer::update_efidisk_size($conf);
+ }
+
$self->log('info', "copying local disk images") if scalar(%$local_volumes);
foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $targetsid = $override_targetsid // $sid;
+ my $targetsid = PVE::QemuServer::map_storage($self->{opts}->{storagemap}, $sid);
my $ref = $local_volumes->{$volid}->{ref};
if ($self->{running} && $ref eq 'config') {
push @{$self->{online_local_volumes}}, $volid;
next if $self->{replicated_volumes}->{$volid};
push @{$self->{volumes}}, $volid;
my $opts = $self->{opts};
- my $insecure = $opts->{migration_type} eq 'insecure';
- my $with_snapshots = $local_volumes->{$volid}->{snapshots};
# use 'migrate' limit for transfer to other node
my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', [$targetsid, $sid], $opts->{bwlimit});
# JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps
$bwlimit = $bwlimit * 1024 if defined($bwlimit);
- PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid,
- undef, undef, undef, $bwlimit, $insecure, $with_snapshots);
+ my $storage_migrate_opts = {
+ 'bwlimit' => $bwlimit,
+ 'insecure' => $opts->{migration_type} eq 'insecure',
+ 'with_snapshots' => $local_volumes->{$volid}->{snapshots},
+ 'allow_rename' => !$local_volumes->{$volid}->{is_vmstate},
+ };
+
+ my $logfunc = sub { $self->log('info', $_[0]); };
+ my $new_volid = eval {
+ PVE::Storage::storage_migrate($storecfg, $volid, $self->{ssh_info},
+ $targetsid, $storage_migrate_opts, $logfunc);
+ };
+ if (my $err = $@) {
+ die "storage migration for '$volid' to storage '$targetsid' failed - $err\n";
+ }
+
+ $self->{volume_map}->{$volid} = $new_volid;
+ $self->log('info', "volume '$volid' is '$new_volid' on the target\n");
}
}
};
my ($self) = @_;
foreach my $target_drive (keys %{$self->{target_drive}}) {
+ my $drivestr = $self->{target_drive}->{$target_drive}->{drivestr};
+ next if !defined($drivestr);
+
+ my $drive = PVE::QemuServer::parse_drive($target_drive, $drivestr);
+
# don't clean up replicated disks!
- next if defined($self->{target_drive}->{$target_drive}->{bitmap});
+ next if defined($self->{replicated_volumes}->{$drive->{file}});
- my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{drivestr});
my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
sub cleanup_bitmaps {
my ($self) = @_;
- foreach my $drive (%{$self->{target_drive}}) {
+ foreach my $drive (keys %{$self->{target_drive}}) {
my $bitmap = $self->{target_drive}->{$drive}->{bitmap};
next if !$bitmap;
$self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'");
push @$cmd, '--machine', $self->{forcemachine};
}
+ if ($self->{forcecpu}) {
+ push @$cmd, '--force-cpu', $self->{forcecpu};
+ }
+
if ($self->{online_local_volumes}) {
push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1');
}
my $spice_port;
- my $tunnel_addr = [];
- my $sock_addr = [];
+ my $unix_socket_info = {};
# version > 0 for unix socket support
my $nbd_protocol_version = 1;
# TODO change to 'spice_ticket: <ticket>\n' in 7.0
my $input = $spice_ticket ? "$spice_ticket\n" : "\n";
$input .= "nbd_protocol_version: $nbd_protocol_version\n";
+ my $number_of_online_replicated_volumes = 0;
+
+ # prevent auto-vivification
+ if ($self->{online_local_volumes}) {
+ foreach my $volid (@{$self->{online_local_volumes}}) {
+ next if !$self->{replicated_volumes}->{$volid};
+ $number_of_online_replicated_volumes++;
+ $input .= "replicated_volume: $volid\n";
+ }
+ }
+
+ my $target_replicated_volumes = {};
+
# Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
# instead we pipe it through STDIN
my $exitcode = PVE::Tools::run_command($cmd, input => $input, outfunc => sub {
my $targetdrive = $3;
$targetdrive =~ s/drive-//g;
+ $self->{stopnbd} = 1;
$self->{target_drive}->{$targetdrive}->{drivestr} = $drivestr;
$self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
} elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) {
my $targetdrive = $3;
$targetdrive =~ s/drive-//g;
+ $self->{stopnbd} = 1;
$self->{target_drive}->{$targetdrive}->{drivestr} = $drivestr;
$self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
- push @$tunnel_addr, "$nbd_unix_addr:$nbd_unix_addr";
- push @$sock_addr, $nbd_unix_addr;
+ $unix_socket_info->{$nbd_unix_addr} = 1;
+ } elsif ($line =~ m/^re-using replicated volume: (\S+) - (.*)$/) {
+ my $drive = $1;
+ my $volid = $2;
+ $target_replicated_volumes->{$volid} = $drive;
} elsif ($line =~ m/^QEMU: (.*)$/) {
$self->log('info', "[$self->{node}] $1\n");
}
die "unable to detect remote migration address\n" if !$raddr;
+ if (scalar(keys %$target_replicated_volumes) != $number_of_online_replicated_volumes) {
+ die "number of replicated disks on source and target node do not match - target node too old?\n"
+ }
+
$self->log('info', "start remote tunnel");
if ($migration_type eq 'secure') {
if ($ruri =~ /^unix:/) {
- unlink $raddr;
- push @$tunnel_addr, "$raddr:$raddr";
- $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
- push @$sock_addr, $raddr;
+ my $ssh_forward_info = ["$raddr:$raddr"];
+ $unix_socket_info->{$raddr} = 1;
+
+ my $unix_sockets = [ keys %$unix_socket_info ];
+ for my $sock (@$unix_sockets) {
+ push @$ssh_forward_info, "$sock:$sock";
+ unlink $sock;
+ }
+
+ $self->{tunnel} = $self->fork_tunnel($ssh_forward_info);
my $unix_socket_try = 0; # wait for the socket to become ready
while ($unix_socket_try <= 100) {
$unix_socket_try++;
my $available = 0;
- foreach my $sock (@$sock_addr) {
+ foreach my $sock (@$unix_sockets) {
if (-S $sock) {
$available++;
}
}
- if ($available == @$sock_addr) {
+ if ($available == @$unix_sockets) {
last;
}
$self->finish_tunnel($self->{tunnel});
die "Timeout, migration socket $ruri did not get ready";
}
+ $self->{tunnel}->{unix_sockets} = $unix_sockets if (@$unix_sockets);
} elsif ($ruri =~ /^tcp:/) {
- my $tunnel_addr;
+ my $ssh_forward_info = [];
if ($raddr eq "localhost") {
# for backwards compatibility with older qemu-server versions
my $pfamily = PVE::Tools::get_host_address_family($nodename);
my $lport = PVE::Tools::next_migrate_port($pfamily);
- $tunnel_addr = "$lport:localhost:$rport";
+ push @$ssh_forward_info, "$lport:localhost:$rport";
}
- $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
+ $self->{tunnel} = $self->fork_tunnel($ssh_forward_info);
} else {
die "unsupported protocol in migration URI: $ruri\n";
#fork tunnel for insecure migration, to send faster commands like resume
$self->{tunnel} = $self->fork_tunnel();
}
- $self->{tunnel}->{sock_addr} = $sock_addr if (@$sock_addr);
-
my $start = time();
my $opt_bwlimit = $self->{opts}->{bwlimit};
my $source_drive = PVE::QemuServer::parse_drive($drive, $conf->{$drive});
my $target_drive = PVE::QemuServer::parse_drive($drive, $target->{drivestr});
- my $source_sid = PVE::Storage::Plugin::parse_volume_id($source_drive->{file});
- my $target_sid = PVE::Storage::Plugin::parse_volume_id($target_drive->{file});
+ my $source_volid = $source_drive->{file};
+ my $target_volid = $target_drive->{file};
+
+ my $source_sid = PVE::Storage::Plugin::parse_volume_id($source_volid);
+ my $target_sid = PVE::Storage::Plugin::parse_volume_id($target_volid);
my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', [$source_sid, $target_sid], $opt_bwlimit);
my $bitmap = $target->{bitmap};
$self->log('info', "$drive: start migration to $nbd_uri");
PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 'skip', undef, $bwlimit, $bitmap);
+
+ $self->{volume_map}->{$source_volid} = $target_volid;
+ $self->log('info', "volume '$source_volid' is '$target_volid' on the target\n");
}
}
my $migrate_downtime = $defaults->{migrate_downtime};
$migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
- if (defined($migrate_downtime)) {
- # migrate-set-parameters expects limit in ms
- $migrate_downtime *= 1000;
- $self->log('info', "migration downtime limit: $migrate_downtime ms");
- $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
- }
+ # migrate-set-parameters expects limit in ms
+ $migrate_downtime *= 1000;
+ $self->log('info', "migration downtime limit: $migrate_downtime ms");
+ $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
# set cachesize to 10% of the total memory
my $memory = $conf->{memory} || $defaults->{memory};
if ($downtimecounter > 5) {
$downtimecounter = 0;
$migrate_downtime *= 2;
- $self->log('info', "migrate_set_downtime: $migrate_downtime");
+ $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms");
eval {
- mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
+ # migrate-set-parameters does not touch values not
+ # specified, so this only changes downtime-limit
+ mon_cmd($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime));
};
- $self->log('info', "migrate_set_downtime error: $@") if $@;
+ $self->log('info', "migrate-set-parameters error: $@") if $@;
}
}
# cleanup ressources on target host
if ($self->{storage_migration}) {
-
eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
if (my $err = $@) {
$self->log('err', $err);
}
+ }
- eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
- if (my $err = $@) {
- $self->log('err', $err);
- }
- eval { $self->cleanup_bitmaps() };
- if (my $err =$@) {
- $self->log('err', $err);
- }
+ eval { $self->cleanup_bitmaps() };
+ if (my $err =$@) {
+ $self->log('err', $err);
}
my $nodename = PVE::INotify::nodename();
$self->{errors} = 1;
}
+ # cleanup after stopping, otherwise disks might be in-use by target VM!
+ eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ }
+
+
if ($self->{tunnel}) {
eval { finish_tunnel($self, $self->{tunnel}); };
if (my $err = $@) {
eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
die "Failed to complete storage migration: $err\n";
- } else {
- foreach my $target_drive (keys %{$self->{target_drive}}) {
- my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{drivestr});
- $conf->{$target_drive} = PVE::QemuServer::print_drive($drive);
- PVE::QemuConfig->write_config($vmid, $conf);
- }
}
}
+ if ($self->{volume_map}) {
+ my $target_drives = $self->{target_drive};
+
+ # FIXME: for NBD storage migration we now only update the volid, and
+ # not the full drivestr from the target node. Workaround that until we
+ # got some real rescan, to avoid things like wrong format in the drive
+ delete $conf->{$_} for keys %$target_drives;
+ PVE::QemuConfig->update_volume_ids($conf, $self->{volume_map});
+
+ for my $drive (keys %$target_drives) {
+ $conf->{$drive} = $target_drives->{$drive}->{drivestr};
+ }
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+
# transfer replication state before move config
$self->transfer_replication_state() if $self->{replicated_volumes};
$self->switch_replication_job_target() if $self->{replicated_volumes};
if ($self->{livemigration}) {
- if ($self->{storage_migration}) {
+ if ($self->{stopnbd}) {
+ $self->log('info', "stopping NBD storage migration server on target.");
# stop nbd server on remote vm - requirement for resume since 2.9
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];