X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FQemuMigrate.pm;h=65f39b6e778ff62977b656c355d594479084daa0;hb=7e7ec468a00937a335844b9105c244810c114e5b;hp=8c6fa03c3ab5f48f86d3b88126b5d31d535d0707;hpb=46dd42f70cb97e1041fd7cf0b9f55ed324268a58;p=qemu-server.git diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm index 8c6fa03..65f39b6 100644 --- a/PVE/QemuMigrate.pm +++ b/PVE/QemuMigrate.pm @@ -218,10 +218,10 @@ sub prepare { $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf); } - - if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) { + my $loc_res = PVE::QemuServer::check_local_resources($conf, 1); + if (scalar @$loc_res) { if ($self->{running} || !$self->{opts}->{force}) { - die "can't migrate VM which uses local devices\n"; + die "can't migrate VM which uses local devices: " . join(", ", @$loc_res) . "\n"; } else { $self->log('info', "migrating VM which uses local devices"); } @@ -234,7 +234,7 @@ sub prepare { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1); # check if storage is available on both nodes - my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid; + my $targetsid = $self->{opts}->{targetstorage} // $sid; my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); @@ -246,6 +246,7 @@ sub prepare { if !$plugin->check_connection($sid, $scfg); } else { # only activate if not shared + next if ($volid =~ m/vm-\d+-cloudinit/); push @$need_activate, $volid; } } @@ -269,7 +270,7 @@ sub sync_disks { # local volumes which have been copied $self->{volumes} = []; - my $res = []; + my $override_targetsid = $self->{opts}->{targetstorage}; eval { @@ -303,7 +304,7 @@ sub sync_disks { next if @{$dl->{$storeid}} == 0; - my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $storeid; + my $targetsid = $override_targetsid // $storeid; # check if storage is available on target node PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); @@ -320,6 +321,7 @@ sub sync_disks { my ($volid, $attr) = @_; if ($volid =~ m|^/|) { + return if $attr->{shared}; $local_volumes->{$volid}->{ref} = 'config'; die "local file/device\n"; } @@ -341,7 +343,7 @@ sub sync_disks { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid; + my $targetsid = $override_targetsid // $sid; # check if storage is available on both nodes my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); @@ -352,7 +354,13 @@ sub sync_disks { $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot'; - die "local cdrom image\n" if $attr->{cdrom}; + if ($attr->{cdrom}) { + if ($volid =~ /vm-\d+-cloudinit/) { + $local_volumes->{$volid}->{ref} = 'generated'; + return; + } + die "local cdrom image\n"; + } my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid); @@ -393,6 +401,8 @@ sub sync_disks { $self->log('info', "found local disk '$vol' (in current VM config)\n"); } elsif ($ref eq 'snapshot') { $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n"); + } elsif ($ref eq 'generated') { + $self->log('info', "found generated disk '$vol' (in current VM config)\n"); } else { $self->log('info', "found local disk '$vol'\n"); } @@ -405,10 +415,6 @@ sub sync_disks { $self->log('warn', "$err"); } - if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) { - $self->{opts}->{targetstorage} = 1; #use same sid for remote local - } - if ($abort) { die "can't migrate VM - check log\n"; } @@ -447,15 +453,28 @@ sub sync_disks { foreach my $volid (keys %$local_volumes) { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid}->{ref} eq 'config') { + my $targetsid = $override_targetsid // $sid; + my $ref = $local_volumes->{$volid}->{ref}; + if ($self->{running} && $ref eq 'config') { push @{$self->{online_local_volumes}}, $volid; + } elsif ($ref eq 'generated') { + die "can't live migrate VM with local cloudinit disk. use a shared storage instead\n" if $self->{running}; + # skip all generated volumes but queue them for deletion in phase3_cleanup + push @{$self->{volumes}}, $volid; + next; } else { next if $rep_volumes->{$volid}; push @{$self->{volumes}}, $volid; - my $insecure = $self->{opts}->{migration_type} eq 'insecure'; + my $opts = $self->{opts}; + my $insecure = $opts->{migration_type} eq 'insecure'; my $with_snapshots = $local_volumes->{$volid}->{snapshots}; - PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid, - undef, undef, undef, undef, $insecure, $with_snapshots); + # use 'migrate' limit for transfer to other node + my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$targetsid, $sid], $opts->{bwlimit}); + # JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps + $bwlimit = $bwlimit * 1024 if defined($bwlimit); + + PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid, + undef, undef, undef, $bwlimit, $insecure, $with_snapshots); } } }; @@ -555,8 +574,8 @@ sub phase2 { push @$cmd, '--machine', $self->{forcemachine}; } - if ($self->{opts}->{targetstorage}) { - push @$cmd, '--targetstorage', $self->{opts}->{targetstorage}; + if ($self->{online_local_volumes}) { + push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1'); } my $spice_port; @@ -581,10 +600,10 @@ sub phase2 { $rport = int($1); $ruri = "tcp:$raddr:$rport"; } - elsif ($line =~ m/^spice listens on port (\d+)$/) { + elsif ($line =~ m/^spice listens on port (\d+)$/) { $spice_port = int($1); } - elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) { + elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) { my $volid = $4; my $nbd_uri = "nbd:$1:$2:exportname=$3"; my $targetdrive = $3; @@ -601,8 +620,9 @@ sub phase2 { die "unable to detect remote migration address\n" if !$raddr; + $self->log('info', "start remote tunnel"); + if ($migration_type eq 'secure') { - $self->log('info', "start remote tunnel"); if ($ruri =~ /^unix:/) { unlink $raddr; @@ -635,11 +655,16 @@ sub phase2 { } else { die "unsupported protocol in migration URI: $ruri\n"; } + } else { + #fork tunnel for insecure migration, to send faster commands like resume + $self->{tunnel} = $self->fork_tunnel(); } my $start = time(); - if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) { + my $opt_bwlimit = $self->{opts}->{bwlimit}; + + if (defined($self->{online_local_volumes})) { $self->{storage_migration} = 1; $self->{storage_migration_jobs} = {}; $self->log('info', "starting storage migration"); @@ -647,9 +672,14 @@ sub phase2 { die "The number of local disks does not match between the source and the destination.\n" if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}}); foreach my $drive (keys %{$self->{target_drive}}){ - my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri}; - $self->log('info', "$drive: start migration to to $nbd_uri"); - PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1); + my $target = $self->{target_drive}->{$drive}; + my $nbd_uri = $target->{nbd_uri}; + my $source_sid = PVE::Storage::Plugin::parse_volume_id($conf->{$drive}); + my $target_sid = PVE::Storage::Plugin::parse_volume_id($target->{volid}); + my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$source_sid, $target_sid], $opt_bwlimit); + + $self->log('info', "$drive: start migration to $nbd_uri"); + PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1, undef, $bwlimit); } } @@ -659,11 +689,21 @@ sub phase2 { # load_defaults my $defaults = PVE::QemuServer::load_defaults(); - # always set migrate speed (overwrite kvm default of 32m) - # we set a very hight default of 8192m which is basically unlimited - my $migrate_speed = $defaults->{migrate_speed} || 8192; - $migrate_speed = $conf->{migrate_speed} || $migrate_speed; - $migrate_speed = $migrate_speed * 1048576; + # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the + # migrate_speed parameter in qm.conf - take the lower of the two. + my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', undef, $opt_bwlimit) // 0; + my $migrate_speed = $conf->{migrate_speed} // $bwlimit; + # migrate_speed is in MB/s, bwlimit in KB/s + $migrate_speed *= 1024; + + $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed; + + # always set migrate speed (overwrite kvm default of 32m) we set a very high + # default of 8192m which is basically unlimited + $migrate_speed ||= ($defaults->{migrate_speed} || 8192) * 1024; + + # qmp takes migrate_speed in B/s. + $migrate_speed *= 1024; $self->log('info', "migrate_set_speed: $migrate_speed"); eval { PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed)); @@ -689,6 +729,8 @@ sub phase2 { # set cachesize to 10% of the total memory my $memory = $conf->{memory} || $defaults->{memory}; my $cachesize = int($memory * 1048576 / 10); + $cachesize = round_powerof2($cachesize); + $self->log('info', "set cachesize: $cachesize"); eval { PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize)); @@ -702,13 +744,13 @@ sub phase2 { my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node}); my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem"; - my $subject = PVE::AccessControl::read_x509_subject_spice($filename); + my $subject = PVE::AccessControl::read_x509_subject_spice($filename); $self->log('info', "spice client_migrate_info"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice', - hostname => $proxyticket, 'tls-port' => $spice_port, + PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice', + hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port, 'cert-subject' => $subject); }; $self->log('info', "client_migrate_info error: $@") if $@; @@ -813,7 +855,7 @@ sub phase2 { $lstat = $stat->{ram}->{transferred}; - + } else { die $merr if $merr; die "unable to parse migration status '$stat->{status}' - aborting\n"; @@ -857,7 +899,7 @@ sub phase2_cleanup { } my $nodename = PVE::INotify::nodename(); - + my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename]; eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; if (my $err = $@) { @@ -961,6 +1003,11 @@ sub phase3_cleanup { $self->{errors} = 1; } } + + if ($self->{storage_migration} && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && $self->{running}) { + my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim']; + eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; + } } # close tunnel on successful migration, on error phase2_cleanup closed it @@ -982,7 +1029,7 @@ sub phase3_cleanup { last if $timer > 50; $timer ++; usleep(200000); - } + } } }; @@ -1029,4 +1076,9 @@ sub final_cleanup { # nothing to do } +sub round_powerof2 { + return 1 if $_[0] < 2; + return 2 << int(log($_[0]-1)/log(2)); +} + 1;