]> git.proxmox.com Git - qemu-server.git/blobdiff - PVE/QemuMigrate.pm
fix reverting for non-existing configs
[qemu-server.git] / PVE / QemuMigrate.pm
index 55c9a7c652df61833951f0612815be111a3efe47..65f39b6e778ff62977b656c355d594479084daa0 100644 (file)
@@ -218,10 +218,10 @@ sub prepare {
        $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
 
     }
-
-    if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
+    my $loc_res = PVE::QemuServer::check_local_resources($conf, 1);
+    if (scalar @$loc_res) {
        if ($self->{running} || !$self->{opts}->{force}) {
-           die "can't migrate VM which uses local devices\n";
+           die "can't migrate VM which uses local devices: " . join(", ", @$loc_res) . "\n";
        } else {
            $self->log('info', "migrating VM which uses local devices");
        }
@@ -458,6 +458,7 @@ sub sync_disks {
            if ($self->{running} && $ref eq 'config') {
                push @{$self->{online_local_volumes}}, $volid;
            } elsif ($ref eq 'generated') {
+               die "can't live migrate VM with local cloudinit disk. use a shared storage instead\n" if $self->{running};
                # skip all generated volumes but queue them for deletion in phase3_cleanup
                push @{$self->{volumes}}, $volid;
                next;
@@ -661,6 +662,8 @@ sub phase2 {
 
     my $start = time();
 
+    my $opt_bwlimit = $self->{opts}->{bwlimit};
+
     if (defined($self->{online_local_volumes})) {
        $self->{storage_migration} = 1;
        $self->{storage_migration_jobs} = {};
@@ -669,9 +672,14 @@ sub phase2 {
        die "The number of local disks does not match between the source and the destination.\n"
            if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
        foreach my $drive (keys %{$self->{target_drive}}){
-           my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
+           my $target = $self->{target_drive}->{$drive};
+           my $nbd_uri = $target->{nbd_uri};
+           my $source_sid = PVE::Storage::Plugin::parse_volume_id($conf->{$drive});
+           my $target_sid = PVE::Storage::Plugin::parse_volume_id($target->{volid});
+           my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$source_sid, $target_sid], $opt_bwlimit);
+
            $self->log('info', "$drive: start migration to $nbd_uri");
-           PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1);
+           PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1, undef, $bwlimit);
        }
     }
 
@@ -741,8 +749,8 @@ sub phase2 {
        $self->log('info', "spice client_migrate_info");
 
        eval {
-           PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice', 
-                                               hostname => $proxyticket, 'tls-port' => $spice_port, 
+           PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
+                                               hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
                                                'cert-subject' => $subject);
        };
        $self->log('info', "client_migrate_info error: $@") if $@;
@@ -847,7 +855,7 @@ sub phase2 {
 
 
            $lstat = $stat->{ram}->{transferred};
-           
+
        } else {
            die $merr if $merr;
            die "unable to parse migration status '$stat->{status}' - aborting\n";
@@ -891,7 +899,7 @@ sub phase2_cleanup {
     }
 
     my $nodename = PVE::INotify::nodename();
+
     my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
     eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
     if (my $err = $@) {