1 package PVE
::QemuMigrate
;
5 use PVE
::AbstractMigrate
;
8 use POSIX
qw( WNOHANG );
13 use PVE
::ReplicationTools
;
15 use Time
::HiRes
qw( usleep );
16 use PVE
::RPCEnvironment
;
18 use base
qw(PVE::AbstractMigrate);
20 sub fork_command_pipe
{
21 my ($self, $cmd) = @_;
23 my $reader = IO
::File-
>new();
24 my $writer = IO
::File-
>new();
30 eval { $cpid = open2
($reader, $writer, @$cmd); };
35 if ($orig_pid != $$) {
36 $self->log('err', "can't fork command pipe\n");
43 return { writer
=> $writer, reader
=> $reader, pid
=> $cpid };
46 sub finish_command_pipe
{
47 my ($self, $cmdpipe, $timeout) = @_;
49 my $cpid = $cmdpipe->{pid
};
50 return if !defined($cpid);
52 my $writer = $cmdpipe->{writer
};
53 my $reader = $cmdpipe->{reader
};
58 my $collect_child_process = sub {
59 my $res = waitpid($cpid, WNOHANG
);
60 if (defined($res) && ($res == $cpid)) {
61 delete $cmdpipe->{cpid
};
69 for (my $i = 0; $i < $timeout; $i++) {
70 return if &$collect_child_process();
75 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
79 for (my $i = 0; $i < 10; $i++) {
80 return if &$collect_child_process();
84 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
88 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
89 if !&$collect_child_process();
93 my ($self, $tunnel_addr) = @_;
95 my @localtunnelinfo = defined($tunnel_addr) ?
('-L' , $tunnel_addr ) : ();
97 my $cmd = [@{$self->{rem_ssh
}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ];
99 my $tunnel = $self->fork_command_pipe($cmd);
101 my $reader = $tunnel->{reader
};
105 PVE
::Tools
::run_with_timeout
(60, sub { $helo = <$reader>; });
106 die "no reply\n" if !$helo;
107 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
108 die "got strange reply from mtunnel ('$helo')\n"
109 if $helo !~ m/^tunnel online$/;
114 $self->finish_command_pipe($tunnel);
115 die "can't open migration tunnel - $err";
121 my ($self, $tunnel) = @_;
123 my $writer = $tunnel->{writer
};
126 PVE
::Tools
::run_with_timeout
(30, sub {
127 print $writer "quit\n";
133 $self->finish_command_pipe($tunnel, 30);
135 if ($tunnel->{sock_addr
}) {
136 # ssh does not clean up on local host
137 my $cmd = ['rm', '-f', $tunnel->{sock_addr
}]; #
138 PVE
::Tools
::run_command
($cmd);
140 # .. and just to be sure check on remote side
141 unshift @{$cmd}, @{$self->{rem_ssh
}};
142 PVE
::Tools
::run_command
($cmd);
149 my ($self, $vmid, $code, @param) = @_;
151 return PVE
::QemuConfig-
>lock_config($vmid, $code, @param);
155 my ($self, $vmid) = @_;
157 my $online = $self->{opts
}->{online
};
159 $self->{storecfg
} = PVE
::Storage
::config
();
162 my $conf = $self->{vmconf
} = PVE
::QemuConfig-
>load_config($vmid);
164 PVE
::QemuConfig-
>check_lock($conf);
167 if (my $pid = PVE
::QemuServer
::check_running
($vmid)) {
168 die "can't migrate running VM without --online\n" if !$online;
171 $self->{forcemachine
} = PVE
::QemuServer
::qemu_machine_pxe
($vmid, $conf);
175 if (my $loc_res = PVE
::QemuServer
::check_local_resources
($conf, 1)) {
176 if ($self->{running
} || !$self->{opts
}->{force
}) {
177 die "can't migrate VM which uses local devices\n";
179 $self->log('info', "migrating VM which uses local devices");
183 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
185 my $need_activate = [];
186 foreach my $volid (@$vollist) {
187 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid, 1);
189 # check if storage is available on both nodes
190 my $targetsid = $self->{opts
}->{targetstorage
} ?
$self->{opts
}->{targetstorage
} : $sid;
192 my $scfg = PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid);
193 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $targetsid, $self->{node
});
195 if ($scfg->{shared
}) {
196 # PVE::Storage::activate_storage checks this for non-shared storages
197 my $plugin = PVE
::Storage
::Plugin-
>lookup($scfg->{type
});
198 warn "Used shared storage '$sid' is not online on source node!\n"
199 if !$plugin->check_connection($sid, $scfg);
201 # only activate if not shared
202 push @$need_activate, $volid;
207 PVE
::Storage
::activate_volumes
($self->{storecfg
}, $need_activate);
209 # test ssh connection
210 my $cmd = [ @{$self->{rem_ssh
}}, '/bin/true' ];
211 eval { $self->cmd_quiet($cmd); };
212 die "Can't connect to destination address using public key\n" if $@;
218 my ($self, $vmid) = @_;
220 my $conf = $self->{vmconf
};
222 # local volumes which have been copied
223 $self->{volumes
} = [];
229 # found local volumes and their origin
230 my $local_volumes = {};
231 my $local_volumes_errors = {};
232 my $other_errors = [];
237 my $log_error = sub {
238 my ($msg, $volid) = @_;
240 if (defined($volid)) {
241 $local_volumes_errors->{$volid} = $msg;
243 push @$other_errors, $msg;
248 my @sids = PVE
::Storage
::storage_ids
($self->{storecfg
});
249 foreach my $storeid (@sids) {
250 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $storeid);
251 next if $scfg->{shared
};
252 next if !PVE
::Storage
::storage_check_enabled
($self->{storecfg
}, $storeid, undef, 1);
254 # get list from PVE::Storage (for unused volumes)
255 my $dl = PVE
::Storage
::vdisk_list
($self->{storecfg
}, $storeid, $vmid);
257 next if @{$dl->{$storeid}} == 0;
259 my $targetsid = $self->{opts
}->{targetstorage
} ?
$self->{opts
}->{targetstorage
} : $storeid;
261 # check if storage is available on target node
262 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $targetsid, $self->{node
});
263 $sharedvm = 0; # there is a non-shared disk
265 PVE
::Storage
::foreach_volid
($dl, sub {
266 my ($volid, $sid, $volname) = @_;
268 $local_volumes->{$volid} = 'storage';
272 my $test_volid = sub {
273 my ($volid, $is_cdrom, $snapname) = @_;
277 if ($volid =~ m
|^/|) {
278 $local_volumes->{$volid} = 'config';
279 die "local file/device\n";
283 if ($volid eq 'cdrom') {
284 my $msg = "can't migrate local cdrom drive";
285 $msg .= " (referenced in snapshot '$snapname')"
286 if defined($snapname);
288 &$log_error("$msg\n");
291 return if $volid eq 'none';
294 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
296 my $targetsid = $self->{opts
}->{targetstorage
} ?
$self->{opts
}->{targetstorage
} : $sid;
297 # check if storage is available on both nodes
298 my $scfg = PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid);
299 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $targetsid, $self->{node
});
301 return if $scfg->{shared
};
305 $local_volumes->{$volid} = defined($snapname) ?
'snapshot' : 'config';
307 die "local cdrom image\n" if $is_cdrom;
309 my ($path, $owner) = PVE
::Storage
::path
($self->{storecfg
}, $volid);
311 die "owned by other VM (owner = VM $owner)\n"
312 if !$owner || ($owner != $self->{vmid
});
314 if (defined($snapname)) {
315 # we cannot migrate shapshots on local storage
316 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
318 my $format = PVE
::QemuServer
::qemu_img_format
($scfg, $volname);
319 die "online storage migration not possible if snapshot exists\n" if $self->{running
};
320 if (!($scfg->{type
} eq 'zfspool' || $format eq 'qcow2')) {
321 die "non-migratable snapshot exists\n";
325 die "referenced by linked clone(s)\n"
326 if PVE
::Storage
::volume_is_base_and_used
($self->{storecfg
}, $volid);
329 my $test_drive = sub {
330 my ($ds, $drive, $snapname) = @_;
333 &$test_volid($drive->{file
}, PVE
::QemuServer
::drive_is_cdrom
($drive), $snapname);
336 &$log_error($@, $drive->{file
}) if $@;
339 foreach my $snapname (keys %{$conf->{snapshots
}}) {
341 &$test_volid($conf->{snapshots
}->{$snapname}->{'vmstate'}, 0, undef)
342 if defined($conf->{snapshots
}->{$snapname}->{'vmstate'});
344 &$log_error($@, $conf->{snapshots
}->{$snapname}->{'vmstate'}) if $@;
346 PVE
::QemuServer
::foreach_drive
($conf->{snapshots
}->{$snapname}, $test_drive, $snapname);
348 PVE
::QemuServer
::foreach_drive
($conf, $test_drive);
350 foreach my $vol (sort keys %$local_volumes) {
351 if ($local_volumes->{$vol} eq 'storage') {
352 $self->log('info', "found local disk '$vol' (via storage)\n");
353 } elsif ($local_volumes->{$vol} eq 'config') {
354 die "can't live migrate attached local disks without with-local-disks option\n" if $self->{running
} && !$self->{opts
}->{"with-local-disks"};
355 $self->log('info', "found local disk '$vol' (in current VM config)\n");
356 } elsif ($local_volumes->{$vol} eq 'snapshot') {
357 $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
359 $self->log('info', "found local disk '$vol'\n");
363 foreach my $vol (sort keys %$local_volumes_errors) {
364 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
366 foreach my $err (@$other_errors) {
367 $self->log('warn', "$err");
370 if ($self->{running
} && !$sharedvm && !$self->{opts
}->{targetstorage
}) {
371 $self->{opts
}->{targetstorage
} = 1; #use same sid for remote local
375 die "can't migrate VM - check log\n";
378 # additional checks for local storage
379 foreach my $volid (keys %$local_volumes) {
380 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
381 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $sid);
383 my $migratable = ($scfg->{type
} eq 'dir') || ($scfg->{type
} eq 'zfspool') ||
384 ($scfg->{type
} eq 'lvmthin') || ($scfg->{type
} eq 'lvm');
386 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
389 # image is a linked clone on local storage, se we can't migrate.
390 if (my $basename = (PVE
::Storage
::parse_volname
($self->{storecfg
}, $volid))[3]) {
391 die "can't migrate '$volid' as it's a clone of '$basename'";
395 $self->log('info', "copying disk images");
397 foreach my $volid (keys %$local_volumes) {
398 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
399 if ($self->{running
} && $self->{opts
}->{targetstorage
} && $local_volumes->{$volid} eq 'config') {
400 push @{$self->{online_local_volumes
}}, $volid;
402 push @{$self->{volumes
}}, $volid;
403 PVE
::Storage
::storage_migrate
($self->{storecfg
}, $volid, $self->{nodeip
}, $sid);
407 die "Failed to sync data - $@" if $@;
410 sub cleanup_remotedisks
{
413 foreach my $target_drive (keys %{$self->{target_drive
}}) {
415 my $drive = PVE
::QemuServer
::parse_drive
($target_drive, $self->{target_drive
}->{$target_drive}->{volid
});
416 my ($storeid, $volname) = PVE
::Storage
::parse_volume_id
($drive->{file
});
418 my $cmd = [@{$self->{rem_ssh
}}, 'pvesm', 'free', "$storeid:$volname"];
420 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
422 $self->log('err', $err);
429 my ($self, $vmid) = @_;
431 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
433 my $conf = $self->{vmconf
};
435 # set migrate lock in config file
436 $conf->{lock} = 'migrate';
437 PVE
::QemuConfig-
>write_config($vmid, $conf);
439 sync_disks
($self, $vmid);
441 # set new replica_target if we migrate to replica target.
442 if ($conf->{replica
}) {
443 $self->log('info', "change replica target to Node: $self->{opts}->{node}");
444 if ($conf->{replica_target
} eq $self->{node
}) {
445 $conf->{replica_target
} = $self->{opts
}->{node
};
448 PVE
::ReplicationTools
::job_remove
($vmid);
449 PVE
::QemuConfig-
>write_config($vmid, $conf);
454 my ($self, $vmid, $err) = @_;
456 $self->log('info', "aborting phase 1 - cleanup resources");
458 my $conf = $self->{vmconf
};
459 delete $conf->{lock};
460 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
462 $self->log('err', $err);
465 if ($self->{volumes
}) {
466 foreach my $volid (@{$self->{volumes
}}) {
467 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
468 # fixme: try to remove ?
474 my ($self, $vmid) = @_;
476 my $conf = $self->{vmconf
};
478 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
482 my $ruri; # the whole migration dst. URI (protocol:address[:port])
483 my $nodename = PVE
::INotify
::nodename
();
485 ## start on remote node
486 my $cmd = [@{$self->{rem_ssh
}}];
489 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
490 my $res = PVE
::QemuServer
::vm_mon_cmd
($vmid, 'query-spice');
491 $spice_ticket = $res->{ticket
};
494 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
496 # we use TCP only for unsecure migrations as TCP ssh forward tunnels often
497 # did appeared to late (they are hard, if not impossible, to check for)
498 # secure migration use UNIX sockets now, this *breaks* compatibilty when trying
499 # to migrate from new to old but *not* from old to new.
500 my $datacenterconf = PVE
::Cluster
::cfs_read_file
('datacenter.cfg');
502 my $migration_type = 'secure';
503 if (defined($self->{opts
}->{migration_type
})) {
504 $migration_type = $self->{opts
}->{migration_type
};
505 } elsif (defined($datacenterconf->{migration
}->{type
})) {
506 $migration_type = $datacenterconf->{migration
}->{type
};
509 push @$cmd, '--migration_type', $migration_type;
511 push @$cmd, '--migration_network', $self->{opts
}->{migration_network
}
512 if $self->{opts
}->{migration_network
};
514 if ($migration_type eq 'insecure') {
515 push @$cmd, '--stateuri', 'tcp';
517 push @$cmd, '--stateuri', 'unix';
520 if ($self->{forcemachine
}) {
521 push @$cmd, '--machine', $self->{forcemachine
};
524 if ($self->{opts
}->{targetstorage
}) {
525 push @$cmd, '--targetstorage', $self->{opts
}->{targetstorage
};
530 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
531 # instead we pipe it through STDIN
532 PVE
::Tools
::run_command
($cmd, input
=> $spice_ticket, outfunc
=> sub {
535 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
538 $ruri = "tcp:$raddr:$rport";
540 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
542 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
543 $ruri = "unix:$raddr";
545 elsif ($line =~ m/^migration listens on port (\d+)$/) {
546 $raddr = "localhost";
548 $ruri = "tcp:$raddr:$rport";
550 elsif ($line =~ m/^spice listens on port (\d+)$/) {
551 $spice_port = int($1);
553 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
555 my $nbd_uri = "nbd:$1:$2:exportname=$3";
556 my $targetdrive = $3;
557 $targetdrive =~ s/drive-//g;
559 $self->{target_drive
}->{$targetdrive}->{volid
} = $volid;
560 $self->{target_drive
}->{$targetdrive}->{nbd_uri
} = $nbd_uri;
565 $self->log('info', $line);
568 die "unable to detect remote migration address\n" if !$raddr;
570 if ($migration_type eq 'secure') {
571 $self->log('info', "start remote tunnel");
573 if ($ruri =~ /^unix:/) {
575 $self->{tunnel
} = $self->fork_tunnel("$raddr:$raddr");
576 $self->{tunnel
}->{sock_addr
} = $raddr;
578 my $unix_socket_try = 0; # wait for the socket to become ready
579 while (! -S
$raddr) {
581 if ($unix_socket_try > 100) {
583 $self->finish_tunnel($self->{tunnel
});
584 die "Timeout, migration socket $ruri did not get ready";
590 } elsif ($ruri =~ /^tcp:/) {
592 if ($raddr eq "localhost") {
593 # for backwards compatibility with older qemu-server versions
594 my $pfamily = PVE
::Tools
::get_host_address_family
($nodename);
595 my $lport = PVE
::Tools
::next_migrate_port
($pfamily);
596 $tunnel_addr = "$lport:localhost:$rport";
599 $self->{tunnel
} = $self->fork_tunnel($tunnel_addr);
602 die "unsupported protocol in migration URI: $ruri\n";
608 if ($self->{opts
}->{targetstorage
} && defined($self->{online_local_volumes
})) {
609 $self->{storage_migration
} = 1;
610 $self->{storage_migration_jobs
} = {};
611 $self->log('info', "starting storage migration");
613 die "The number of local disks does not match between the source and the destination.\n"
614 if (scalar(keys %{$self->{target_drive
}}) != scalar @{$self->{online_local_volumes
}});
615 foreach my $drive (keys %{$self->{target_drive
}}){
616 my $nbd_uri = $self->{target_drive
}->{$drive}->{nbd_uri
};
617 $self->log('info', "$drive: start migration to to $nbd_uri");
618 PVE
::QemuServer
::qemu_drive_mirror
($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs
}, 1);
622 $self->log('info', "starting online/live migration on $ruri");
623 $self->{livemigration
} = 1;
626 my $defaults = PVE
::QemuServer
::load_defaults
();
628 # always set migrate speed (overwrite kvm default of 32m)
629 # we set a very hight default of 8192m which is basically unlimited
630 my $migrate_speed = $defaults->{migrate_speed
} || 8192;
631 $migrate_speed = $conf->{migrate_speed
} || $migrate_speed;
632 $migrate_speed = $migrate_speed * 1048576;
633 $self->log('info', "migrate_set_speed: $migrate_speed");
635 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_speed", value
=> int($migrate_speed));
637 $self->log('info', "migrate_set_speed error: $@") if $@;
639 my $migrate_downtime = $defaults->{migrate_downtime
};
640 $migrate_downtime = $conf->{migrate_downtime
} if defined($conf->{migrate_downtime
});
641 if (defined($migrate_downtime)) {
642 $self->log('info', "migrate_set_downtime: $migrate_downtime");
644 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_downtime", value
=> int($migrate_downtime*100)/100);
646 $self->log('info', "migrate_set_downtime error: $@") if $@;
649 $self->log('info', "set migration_caps");
651 PVE
::QemuServer
::set_migration_caps
($vmid);
655 #set cachesize 10% of the total memory
656 my $cachesize = int($conf->{memory
}*1048576/10);
657 $self->log('info', "set cachesize: $cachesize");
659 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate-set-cache-size", value
=> int($cachesize));
661 $self->log('info', "migrate-set-cache-size error: $@") if $@;
663 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
664 my $rpcenv = PVE
::RPCEnvironment
::get
();
665 my $authuser = $rpcenv->get_user();
667 my (undef, $proxyticket) = PVE
::AccessControl
::assemble_spice_ticket
($authuser, $vmid, $self->{node
});
669 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
670 my $subject = PVE
::AccessControl
::read_x509_subject_spice
($filename);
672 $self->log('info', "spice client_migrate_info");
675 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "client_migrate_info", protocol
=> 'spice',
676 hostname
=> $proxyticket, 'tls-port' => $spice_port,
677 'cert-subject' => $subject);
679 $self->log('info', "client_migrate_info error: $@") if $@;
683 $self->log('info', "start migrate command to $ruri");
685 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate", uri
=> $ruri);
688 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
691 my $usleep = 2000000;
695 my $downtimecounter = 0;
698 my $avglstat = $lstat/$i if $lstat;
703 $stat = PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "query-migrate");
707 warn "query migrate failed: $err\n";
708 $self->log('info', "query migrate failed: $err");
709 if ($err_count <= 5) {
713 die "too many query migrate failures - aborting\n";
716 if (defined($stat->{status
}) && $stat->{status
} =~ m/^(setup)$/im) {
721 if (defined($stat->{status
}) && $stat->{status
} =~ m/^(active|completed|failed|cancelled)$/im) {
724 if ($stat->{status
} eq 'completed') {
725 my $delay = time() - $start;
727 my $mbps = sprintf "%.2f", $conf->{memory
}/$delay;
728 my $downtime = $stat->{downtime
} || 0;
729 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
733 if ($stat->{status
} eq 'failed' || $stat->{status
} eq 'cancelled') {
734 $self->log('info', "migration status error: $stat->{status}");
738 if ($stat->{status
} ne 'active') {
739 $self->log('info', "migration status: $stat->{status}");
743 if ($stat->{ram
}->{transferred
} ne $lstat) {
744 my $trans = $stat->{ram
}->{transferred
} || 0;
745 my $rem = $stat->{ram
}->{remaining
} || 0;
746 my $total = $stat->{ram
}->{total
} || 0;
747 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
748 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
749 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
750 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
751 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
752 #reduce sleep if remainig memory if lower than the everage transfert
753 $usleep = 300000 if $avglstat && $rem < $avglstat;
755 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
756 "remaining ${rem}), total ${total})");
758 if (${xbzrlecachesize
}) {
759 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
762 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
767 if ($downtimecounter > 5) {
768 $downtimecounter = 0;
769 $migrate_downtime *= 2;
770 $self->log('info', "migrate_set_downtime: $migrate_downtime");
772 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_downtime", value
=> int($migrate_downtime*100)/100);
774 $self->log('info', "migrate_set_downtime error: $@") if $@;
780 $lstat = $stat->{ram
}->{transferred
};
784 die "unable to parse migration status '$stat->{status}' - aborting\n";
788 # just to be sure that the tunnel gets closed on successful migration, on error
789 # phase2_cleanup closes it *after* stopping the remote waiting VM
790 if (!$self->{errors
} && $self->{tunnel
}) {
791 eval { finish_tunnel
($self, $self->{tunnel
}); };
793 $self->log('err', $err);
800 my ($self, $vmid, $err) = @_;
802 return if !$self->{errors
};
803 $self->{phase2errors
} = 1;
805 $self->log('info', "aborting phase 2 - cleanup resources");
807 $self->log('info', "migrate_cancel");
809 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_cancel");
811 $self->log('info', "migrate_cancel error: $@") if $@;
813 my $conf = $self->{vmconf
};
814 delete $conf->{lock};
815 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
817 $self->log('err', $err);
820 # cleanup ressources on target host
821 if ($self->{storage_migration
}) {
823 eval { PVE
::QemuServer
::qemu_blockjobs_cancel
($vmid, $self->{storage_migration_jobs
}) };
825 $self->log('err', $err);
828 eval { PVE
::QemuMigrate
::cleanup_remotedisks
($self) };
830 $self->log('err', $err);
834 my $nodename = PVE
::INotify
::nodename
();
836 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
837 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
839 $self->log('err', $err);
843 if ($self->{tunnel
}) {
844 eval { finish_tunnel
($self, $self->{tunnel
}); };
846 $self->log('err', $err);
853 my ($self, $vmid) = @_;
855 my $volids = $self->{volumes
};
856 return if $self->{phase2errors
};
858 my $synced_volumes = PVE
::ReplicationTools
::get_syncable_guestdisks
($self->{vmconf
}, 'qemu')
859 if $self->{vmconf
}->{replica
};
862 # destroy local copies
863 foreach my $volid (@$volids) {
864 # do not destroy if new target is local_host
865 next if $self->{vmconf
}->{replica
} &&
866 defined($synced_volumes->{$volid}) &&
867 $self->{vmconf
}->{replica_target
} eq $self->{opts
}->{node
};
869 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
871 $self->log('err', "removing local copy of '$volid' failed - $err");
873 last if $err =~ /^interrupted by signal$/;
879 my ($self, $vmid, $err) = @_;
881 my $conf = $self->{vmconf
};
882 return if $self->{phase2errors
};
884 if ($self->{storage_migration
}) {
886 eval { PVE
::QemuServer
::qemu_drive_mirror_monitor
($vmid, undef, $self->{storage_migration_jobs
}); };
889 eval { PVE
::QemuServer
::qemu_blockjobs_cancel
($vmid, $self->{storage_migration_jobs
}) };
890 eval { PVE
::QemuMigrate
::cleanup_remotedisks
($self) };
891 die "Failed to completed storage migration\n";
893 foreach my $target_drive (keys %{$self->{target_drive
}}) {
894 my $drive = PVE
::QemuServer
::parse_drive
($target_drive, $self->{target_drive
}->{$target_drive}->{volid
});
895 $conf->{$target_drive} = PVE
::QemuServer
::print_drive
($vmid, $drive);
896 PVE
::QemuConfig-
>write_config($vmid, $conf);
901 # move config to remote node
902 my $conffile = PVE
::QemuConfig-
>config_file($vmid);
903 my $newconffile = PVE
::QemuConfig-
>config_file($vmid, $self->{node
});
905 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
906 if !rename($conffile, $newconffile);
908 if ($self->{livemigration
}) {
909 if ($self->{storage_migration
}) {
910 # remove drives referencing the nbd server from source
911 # otherwise vm_stop might hang later on
912 foreach my $drive (keys %{$self->{target_drive
}}){
913 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "device_del", id
=> $drive);
915 # stop nbd server on remote vm - requirement for resume since 2.9
916 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'nbdstop', $vmid];
918 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
920 $self->log('err', $err);
924 # config moved and nbd server stopped - now we can resume vm on target
925 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
926 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {},
929 $self->log('err', $line);
933 $self->log('err', $err);
940 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
}) && $self->{running
}) {
941 $self->log('info', "Waiting for spice server migration");
943 my $res = PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, 'query-spice');
944 last if int($res->{'migrated'}) == 1;
952 # always stop local VM
953 eval { PVE
::QemuServer
::vm_stop
($self->{storecfg
}, $vmid, 1, 1); };
955 $self->log('err', "stopping vm failed - $err");
959 # always deactivate volumes - avoid lvm LVs to be active on several nodes
961 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
962 PVE
::Storage
::deactivate_volumes
($self->{storecfg
}, $vollist);
965 $self->log('err', $err);
969 if($self->{storage_migration
}) {
970 # destroy local copies
971 my $volids = $self->{online_local_volumes
};
973 foreach my $volid (@$volids) {
974 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
976 $self->log('err', "removing local copy of '$volid' failed - $err");
978 last if $err =~ /^interrupted by signal$/;
985 my $cmd = [ @{$self->{rem_ssh
}}, 'qm', 'unlock', $vmid ];
986 $self->cmd_logerr($cmd, errmsg
=> "failed to clear migrate lock");
988 if ($self->{vmconf
}->{replica
}) {
989 my $cmd = [ @{$self->{rem_ssh
}}, 'qm', 'set', $vmid, '--replica'];
990 $self->cmd_logerr($cmd, errmsg
=> "failed to activate replica");
995 my ($self, $vmid) = @_;