1 package PVE
::QemuMigrate
;
5 use PVE
::AbstractMigrate
;
8 use POSIX
qw( WNOHANG );
14 use PVE
::QemuServer
::Drive
;
15 use PVE
::QemuServer
::Machine
;
16 use PVE
::QemuServer
::Monitor
qw(mon_cmd);
17 use Time
::HiRes
qw( usleep );
18 use PVE
::RPCEnvironment
;
19 use PVE
::ReplicationConfig
;
20 use PVE
::ReplicationState
;
23 use base
qw(PVE::AbstractMigrate);
25 sub fork_command_pipe
{
26 my ($self, $cmd) = @_;
28 my $reader = IO
::File-
>new();
29 my $writer = IO
::File-
>new();
35 eval { $cpid = open2
($reader, $writer, @$cmd); };
40 if ($orig_pid != $$) {
41 $self->log('err', "can't fork command pipe\n");
48 return { writer
=> $writer, reader
=> $reader, pid
=> $cpid };
51 sub finish_command_pipe
{
52 my ($self, $cmdpipe, $timeout) = @_;
54 my $cpid = $cmdpipe->{pid
};
55 return if !defined($cpid);
57 my $writer = $cmdpipe->{writer
};
58 my $reader = $cmdpipe->{reader
};
63 my $collect_child_process = sub {
64 my $res = waitpid($cpid, WNOHANG
);
65 if (defined($res) && ($res == $cpid)) {
66 delete $cmdpipe->{cpid
};
74 for (my $i = 0; $i < $timeout; $i++) {
75 return if &$collect_child_process();
80 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
84 for (my $i = 0; $i < 10; $i++) {
85 return if &$collect_child_process();
89 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
93 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
94 if !&$collect_child_process();
98 my ($self, $tunnel, $timeout) = @_;
100 $timeout = 60 if !defined($timeout);
102 my $reader = $tunnel->{reader
};
106 PVE
::Tools
::run_with_timeout
($timeout, sub { $output = <$reader>; });
108 die "reading from tunnel failed: $@\n" if $@;
116 my ($self, $tunnel, $timeout, $command) = @_;
118 $timeout = 60 if !defined($timeout);
120 my $writer = $tunnel->{writer
};
123 PVE
::Tools
::run_with_timeout
($timeout, sub {
124 print $writer "$command\n";
128 die "writing to tunnel failed: $@\n" if $@;
130 if ($tunnel->{version
} && $tunnel->{version
} >= 1) {
131 my $res = eval { $self->read_tunnel($tunnel, 10); };
132 die "no reply to command '$command': $@\n" if $@;
137 die "tunnel replied '$res' to command '$command'\n";
143 my ($self, $tunnel_addr) = @_;
145 my @localtunnelinfo = defined($tunnel_addr) ?
('-L' , $tunnel_addr ) : ();
147 my $cmd = [@{$self->{rem_ssh
}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ];
149 my $tunnel = $self->fork_command_pipe($cmd);
152 my $helo = $self->read_tunnel($tunnel, 60);
153 die "no reply\n" if !$helo;
154 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
155 die "got strange reply from mtunnel ('$helo')\n"
156 if $helo !~ m/^tunnel online$/;
161 my $ver = $self->read_tunnel($tunnel, 10);
162 if ($ver =~ /^ver (\d+)$/) {
163 $tunnel->{version
} = $1;
164 $self->log('info', "ssh tunnel $ver\n");
166 $err = "received invalid tunnel version string '$ver'\n" if !$err;
171 $self->finish_command_pipe($tunnel);
172 die "can't open migration tunnel - $err";
178 my ($self, $tunnel) = @_;
180 eval { $self->write_tunnel($tunnel, 30, 'quit'); };
183 $self->finish_command_pipe($tunnel, 30);
185 if ($tunnel->{sock_addr
}) {
186 # ssh does not clean up on local host
187 my $cmd = ['rm', '-f', $tunnel->{sock_addr
}]; #
188 PVE
::Tools
::run_command
($cmd);
190 # .. and just to be sure check on remote side
191 unshift @{$cmd}, @{$self->{rem_ssh
}};
192 PVE
::Tools
::run_command
($cmd);
199 my ($self, $vmid, $code, @param) = @_;
201 return PVE
::QemuConfig-
>lock_config($vmid, $code, @param);
205 my ($self, $vmid) = @_;
207 my $online = $self->{opts
}->{online
};
209 $self->{storecfg
} = PVE
::Storage
::config
();
212 my $conf = $self->{vmconf
} = PVE
::QemuConfig-
>load_config($vmid);
214 PVE
::QemuConfig-
>check_lock($conf);
217 if (my $pid = PVE
::QemuServer
::check_running
($vmid)) {
218 die "can't migrate running VM without --online\n" if !$online;
221 $self->{forcemachine
} = PVE
::QemuServer
::Machine
::qemu_machine_pxe
($vmid, $conf);
224 my $loc_res = PVE
::QemuServer
::check_local_resources
($conf, 1);
225 if (scalar @$loc_res) {
226 if ($self->{running
} || !$self->{opts
}->{force
}) {
227 die "can't migrate VM which uses local devices: " . join(", ", @$loc_res) . "\n";
229 $self->log('info', "migrating VM which uses local devices");
233 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
235 my $need_activate = [];
236 foreach my $volid (@$vollist) {
237 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid, 1);
239 # check if storage is available on both nodes
240 my $targetsid = $self->{opts
}->{targetstorage
} // $sid;
242 my $scfg = PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid);
243 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $targetsid, $self->{node
});
245 if ($scfg->{shared
}) {
246 # PVE::Storage::activate_storage checks this for non-shared storages
247 my $plugin = PVE
::Storage
::Plugin-
>lookup($scfg->{type
});
248 warn "Used shared storage '$sid' is not online on source node!\n"
249 if !$plugin->check_connection($sid, $scfg);
251 # only activate if not shared
252 next if ($volid =~ m/vm-\d+-cloudinit/);
253 push @$need_activate, $volid;
258 PVE
::Storage
::activate_volumes
($self->{storecfg
}, $need_activate);
260 # test ssh connection
261 my $cmd = [ @{$self->{rem_ssh
}}, '/bin/true' ];
262 eval { $self->cmd_quiet($cmd); };
263 die "Can't connect to destination address using public key\n" if $@;
269 my ($self, $vmid) = @_;
271 my $conf = $self->{vmconf
};
273 # local volumes which have been copied
274 $self->{volumes
} = [];
276 my $override_targetsid = $self->{opts
}->{targetstorage
};
280 # found local volumes and their origin
281 my $local_volumes = {};
282 my $local_volumes_errors = {};
283 my $other_errors = [];
286 my $log_error = sub {
287 my ($msg, $volid) = @_;
289 if (defined($volid)) {
290 $local_volumes_errors->{$volid} = $msg;
292 push @$other_errors, $msg;
297 my @sids = PVE
::Storage
::storage_ids
($self->{storecfg
});
298 foreach my $storeid (@sids) {
299 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $storeid);
300 next if $scfg->{shared
};
301 next if !PVE
::Storage
::storage_check_enabled
($self->{storecfg
}, $storeid, undef, 1);
303 # get list from PVE::Storage (for unused volumes)
304 my $dl = PVE
::Storage
::vdisk_list
($self->{storecfg
}, $storeid, $vmid);
306 next if @{$dl->{$storeid}} == 0;
308 my $targetsid = $override_targetsid // $storeid;
310 # check if storage is available on target node
311 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $targetsid, $self->{node
});
313 PVE
::Storage
::foreach_volid
($dl, sub {
314 my ($volid, $sid, $volname) = @_;
316 $local_volumes->{$volid}->{ref} = 'storage';
320 my $test_volid = sub {
321 my ($volid, $attr) = @_;
323 if ($volid =~ m
|^/|) {
324 return if $attr->{shared
};
325 $local_volumes->{$volid}->{ref} = 'config';
326 die "local file/device\n";
329 my $snaprefs = $attr->{referenced_in_snapshot
};
331 if ($attr->{cdrom
}) {
332 if ($volid eq 'cdrom') {
333 my $msg = "can't migrate local cdrom drive";
334 if (defined($snaprefs) && !$attr->{referenced_in_config
}) {
335 my $snapnames = join(', ', sort keys %$snaprefs);
336 $msg .= " (referenced in snapshot - $snapnames)";
338 &$log_error("$msg\n");
341 return if $volid eq 'none';
344 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
346 my $targetsid = $override_targetsid // $sid;
347 # check if storage is available on both nodes
348 my $scfg = PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid);
349 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $targetsid, $self->{node
});
351 return if $scfg->{shared
};
353 $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config
} ?
'config' : 'snapshot';
355 if ($attr->{cdrom
}) {
356 if ($volid =~ /vm-\d+-cloudinit/) {
357 $local_volumes->{$volid}->{ref} = 'generated';
360 die "local cdrom image\n";
363 my ($path, $owner) = PVE
::Storage
::path
($self->{storecfg
}, $volid);
365 die "owned by other VM (owner = VM $owner)\n"
366 if !$owner || ($owner != $self->{vmid
});
368 my $format = PVE
::QemuServer
::qemu_img_format
($scfg, $volname);
369 $local_volumes->{$volid}->{snapshots
} = defined($snaprefs) || ($format =~ /^(?:qcow2|vmdk)$/);
370 if (defined($snaprefs)) {
371 # we cannot migrate shapshots on local storage
372 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
374 die "online storage migration not possible if snapshot exists\n" if $self->{running
};
375 if (!($scfg->{type
} eq 'zfspool' || $format eq 'qcow2')) {
376 die "non-migratable snapshot exists\n";
380 die "referenced by linked clone(s)\n"
381 if PVE
::Storage
::volume_is_base_and_used
($self->{storecfg
}, $volid);
384 PVE
::QemuServer
::foreach_volid
($conf, sub {
385 my ($volid, $attr) = @_;
386 eval { $test_volid->($volid, $attr); };
388 &$log_error($err, $volid);
392 foreach my $vol (sort keys %$local_volumes) {
393 my $ref = $local_volumes->{$vol}->{ref};
394 if ($ref eq 'storage') {
395 $self->log('info', "found local disk '$vol' (via storage)\n");
396 } elsif ($ref eq 'config') {
397 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
398 if $self->{running
} && !$self->{opts
}->{"with-local-disks"};
399 $self->log('info', "found local disk '$vol' (in current VM config)\n");
400 } elsif ($ref eq 'snapshot') {
401 $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
402 } elsif ($ref eq 'generated') {
403 $self->log('info', "found generated disk '$vol' (in current VM config)\n");
405 $self->log('info', "found local disk '$vol'\n");
409 foreach my $vol (sort keys %$local_volumes_errors) {
410 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
412 foreach my $err (@$other_errors) {
413 $self->log('warn', "$err");
417 die "can't migrate VM - check log\n";
420 # additional checks for local storage
421 foreach my $volid (keys %$local_volumes) {
422 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
423 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $sid);
425 my $migratable = $scfg->{type
} =~ /^(?:dir|zfspool|lvmthin|lvm)$/;
427 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
430 # image is a linked clone on local storage, se we can't migrate.
431 if (my $basename = (PVE
::Storage
::parse_volname
($self->{storecfg
}, $volid))[3]) {
432 die "can't migrate '$volid' as it's a clone of '$basename'";
436 my $rep_cfg = PVE
::ReplicationConfig-
>new();
437 if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node
})) {
438 die "can't live migrate VM with replicated volumes\n" if $self->{running
};
439 $self->log('info', "replicating disk images");
440 my $start_time = time();
441 my $logfunc = sub { $self->log('info', shift) };
442 $self->{replicated_volumes
} = PVE
::Replication
::run_replication
(
443 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc);
446 # sizes in config have to be accurate for remote node to correctly
447 # allocate disks, rescan to be sure
448 my $volid_hash = PVE
::QemuServer
::scan_volids
($self->{storecfg
}, $vmid);
449 PVE
::QemuServer
::foreach_drive
($conf, sub {
450 my ($key, $drive) = @_;
451 my ($updated, $old_size, $new_size) = PVE
::QemuServer
::Drive
::update_disksize
($drive, $volid_hash);
452 if (defined($updated)) {
453 $conf->{$key} = PVE
::QemuServer
::print_drive
($updated);
454 $self->log('info', "size of disk '$updated->{file}' ($key) updated from $old_size to $new_size\n");
458 $self->log('info', "copying local disk images") if scalar(%$local_volumes);
460 foreach my $volid (keys %$local_volumes) {
461 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
462 my $targetsid = $override_targetsid // $sid;
463 my $ref = $local_volumes->{$volid}->{ref};
464 if ($self->{running
} && $ref eq 'config') {
465 push @{$self->{online_local_volumes
}}, $volid;
466 } elsif ($ref eq 'generated') {
467 die "can't live migrate VM with local cloudinit disk. use a shared storage instead\n" if $self->{running
};
468 # skip all generated volumes but queue them for deletion in phase3_cleanup
469 push @{$self->{volumes
}}, $volid;
472 next if $self->{replicated_volumes
}->{$volid};
473 push @{$self->{volumes
}}, $volid;
474 my $opts = $self->{opts
};
475 my $insecure = $opts->{migration_type
} eq 'insecure';
476 my $with_snapshots = $local_volumes->{$volid}->{snapshots
};
477 # use 'migrate' limit for transfer to other node
478 my $bwlimit = PVE
::Storage
::get_bandwidth_limit
('migration', [$targetsid, $sid], $opts->{bwlimit
});
479 # JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps
480 $bwlimit = $bwlimit * 1024 if defined($bwlimit);
482 PVE
::Storage
::storage_migrate
($self->{storecfg
}, $volid, $self->{ssh_info
}, $targetsid,
483 undef, undef, undef, $bwlimit, $insecure, $with_snapshots);
487 die "Failed to sync data - $@" if $@;
490 sub cleanup_remotedisks
{
493 foreach my $target_drive (keys %{$self->{target_drive
}}) {
495 my $drive = PVE
::QemuServer
::parse_drive
($target_drive, $self->{target_drive
}->{$target_drive}->{drivestr
});
496 my ($storeid, $volname) = PVE
::Storage
::parse_volume_id
($drive->{file
});
498 my $cmd = [@{$self->{rem_ssh
}}, 'pvesm', 'free', "$storeid:$volname"];
500 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
502 $self->log('err', $err);
509 my ($self, $vmid) = @_;
511 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
513 my $conf = $self->{vmconf
};
515 # set migrate lock in config file
516 $conf->{lock} = 'migrate';
517 PVE
::QemuConfig-
>write_config($vmid, $conf);
519 sync_disks
($self, $vmid);
521 # sync_disks fixes disk sizes to match their actual size, write changes so
522 # target allocates correct volumes
523 PVE
::QemuConfig-
>write_config($vmid, $conf);
527 my ($self, $vmid, $err) = @_;
529 $self->log('info', "aborting phase 1 - cleanup resources");
531 my $conf = $self->{vmconf
};
532 delete $conf->{lock};
533 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
535 $self->log('err', $err);
538 if ($self->{volumes
}) {
539 foreach my $volid (@{$self->{volumes
}}) {
540 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
541 # fixme: try to remove ?
547 my ($self, $vmid) = @_;
549 my $conf = $self->{vmconf
};
551 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
555 my $ruri; # the whole migration dst. URI (protocol:address[:port])
556 my $nodename = PVE
::INotify
::nodename
();
558 ## start on remote node
559 my $cmd = [@{$self->{rem_ssh
}}];
562 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
563 my $res = mon_cmd
($vmid, 'query-spice');
564 $spice_ticket = $res->{ticket
};
567 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
569 my $migration_type = $self->{opts
}->{migration_type
};
571 push @$cmd, '--migration_type', $migration_type;
573 push @$cmd, '--migration_network', $self->{opts
}->{migration_network
}
574 if $self->{opts
}->{migration_network
};
576 if ($migration_type eq 'insecure') {
577 push @$cmd, '--stateuri', 'tcp';
579 push @$cmd, '--stateuri', 'unix';
582 if ($self->{forcemachine
}) {
583 push @$cmd, '--machine', $self->{forcemachine
};
586 if ($self->{online_local_volumes
}) {
587 push @$cmd, '--targetstorage', ($self->{opts
}->{targetstorage
} // '1');
592 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
593 # instead we pipe it through STDIN
594 my $exitcode = PVE
::Tools
::run_command
($cmd, input
=> $spice_ticket, outfunc
=> sub {
597 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
600 $ruri = "tcp:$raddr:$rport";
602 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
604 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
605 $ruri = "unix:$raddr";
607 elsif ($line =~ m/^migration listens on port (\d+)$/) {
608 $raddr = "localhost";
610 $ruri = "tcp:$raddr:$rport";
612 elsif ($line =~ m/^spice listens on port (\d+)$/) {
613 $spice_port = int($1);
615 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
617 my $nbd_uri = "nbd:$1:$2:exportname=$3";
618 my $targetdrive = $3;
619 $targetdrive =~ s/drive-//g;
621 $self->{target_drive
}->{$targetdrive}->{drivestr
} = $drivestr;
622 $self->{target_drive
}->{$targetdrive}->{nbd_uri
} = $nbd_uri;
624 } elsif ($line =~ m/^QEMU: (.*)$/) {
625 $self->log('info', "[$self->{node}] $1\n");
629 $self->log('info', "[$self->{node}] $line");
632 die "remote command failed with exit code $exitcode\n" if $exitcode;
634 die "unable to detect remote migration address\n" if !$raddr;
636 $self->log('info', "start remote tunnel");
638 if ($migration_type eq 'secure') {
640 if ($ruri =~ /^unix:/) {
642 $self->{tunnel
} = $self->fork_tunnel("$raddr:$raddr");
643 $self->{tunnel
}->{sock_addr
} = $raddr;
645 my $unix_socket_try = 0; # wait for the socket to become ready
646 while (! -S
$raddr) {
648 if ($unix_socket_try > 100) {
650 $self->finish_tunnel($self->{tunnel
});
651 die "Timeout, migration socket $ruri did not get ready";
657 } elsif ($ruri =~ /^tcp:/) {
659 if ($raddr eq "localhost") {
660 # for backwards compatibility with older qemu-server versions
661 my $pfamily = PVE
::Tools
::get_host_address_family
($nodename);
662 my $lport = PVE
::Tools
::next_migrate_port
($pfamily);
663 $tunnel_addr = "$lport:localhost:$rport";
666 $self->{tunnel
} = $self->fork_tunnel($tunnel_addr);
669 die "unsupported protocol in migration URI: $ruri\n";
672 #fork tunnel for insecure migration, to send faster commands like resume
673 $self->{tunnel
} = $self->fork_tunnel();
678 my $opt_bwlimit = $self->{opts
}->{bwlimit
};
680 if (defined($self->{online_local_volumes
})) {
681 $self->{storage_migration
} = 1;
682 $self->{storage_migration_jobs
} = {};
683 $self->log('info', "starting storage migration");
685 die "The number of local disks does not match between the source and the destination.\n"
686 if (scalar(keys %{$self->{target_drive
}}) != scalar @{$self->{online_local_volumes
}});
687 foreach my $drive (keys %{$self->{target_drive
}}){
688 my $target = $self->{target_drive
}->{$drive};
689 my $nbd_uri = $target->{nbd_uri
};
691 my $source_drive = PVE
::QemuServer
::parse_drive
($drive, $conf->{$drive});
692 my $target_drive = PVE
::QemuServer
::parse_drive
($drive, $target->{drivestr
});
694 my $source_sid = PVE
::Storage
::Plugin
::parse_volume_id
($source_drive->{file
});
695 my $target_sid = PVE
::Storage
::Plugin
::parse_volume_id
($target_drive->{file
});
697 my $bwlimit = PVE
::Storage
::get_bandwidth_limit
('migration', [$source_sid, $target_sid], $opt_bwlimit);
699 $self->log('info', "$drive: start migration to $nbd_uri");
700 PVE
::QemuServer
::qemu_drive_mirror
($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs
}, 1, undef, $bwlimit);
704 $self->log('info', "starting online/live migration on $ruri");
705 $self->{livemigration
} = 1;
708 my $defaults = PVE
::QemuServer
::load_defaults
();
710 $self->log('info', "set migration_caps");
712 PVE
::QemuServer
::set_migration_caps
($vmid);
716 my $qemu_migrate_params = {};
718 # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
719 # migrate_speed parameter in qm.conf - take the lower of the two.
720 my $bwlimit = PVE
::Storage
::get_bandwidth_limit
('migration', undef, $opt_bwlimit) // 0;
721 my $migrate_speed = $conf->{migrate_speed
} // $bwlimit;
722 # migrate_speed is in MB/s, bwlimit in KB/s
723 $migrate_speed *= 1024;
725 $migrate_speed = ($bwlimit < $migrate_speed) ?
$bwlimit : $migrate_speed;
727 # always set migrate speed (overwrite kvm default of 32m) we set a very high
728 # default of 8192m which is basically unlimited
729 $migrate_speed ||= ($defaults->{migrate_speed
} || 8192) * 1024;
731 # qmp takes migrate_speed in B/s.
732 $migrate_speed *= 1024;
733 $self->log('info', "migration speed limit: $migrate_speed B/s");
734 $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
736 my $migrate_downtime = $defaults->{migrate_downtime
};
737 $migrate_downtime = $conf->{migrate_downtime
} if defined($conf->{migrate_downtime
});
738 if (defined($migrate_downtime)) {
739 # migrate-set-parameters expects limit in ms
740 $migrate_downtime *= 1000;
741 $self->log('info', "migration downtime limit: $migrate_downtime ms");
742 $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
745 # set cachesize to 10% of the total memory
746 my $memory = $conf->{memory
} || $defaults->{memory
};
747 my $cachesize = int($memory * 1048576 / 10);
748 $cachesize = round_powerof2
($cachesize);
750 $self->log('info', "migration cachesize: $cachesize B");
751 $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
753 $self->log('info', "set migration parameters");
755 mon_cmd
($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
757 $self->log('info', "migrate-set-parameters error: $@") if $@;
759 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
760 my $rpcenv = PVE
::RPCEnvironment
::get
();
761 my $authuser = $rpcenv->get_user();
763 my (undef, $proxyticket) = PVE
::AccessControl
::assemble_spice_ticket
($authuser, $vmid, $self->{node
});
765 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
766 my $subject = PVE
::AccessControl
::read_x509_subject_spice
($filename);
768 $self->log('info', "spice client_migrate_info");
771 mon_cmd
($vmid, "client_migrate_info", protocol
=> 'spice',
772 hostname
=> $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
773 'cert-subject' => $subject);
775 $self->log('info', "client_migrate_info error: $@") if $@;
779 $self->log('info', "start migrate command to $ruri");
781 mon_cmd
($vmid, "migrate", uri
=> $ruri);
784 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
787 my $usleep = 1000000;
791 my $downtimecounter = 0;
794 my $avglstat = $lstat/$i if $lstat;
799 $stat = mon_cmd
($vmid, "query-migrate");
803 warn "query migrate failed: $err\n";
804 $self->log('info', "query migrate failed: $err");
805 if ($err_count <= 5) {
809 die "too many query migrate failures - aborting\n";
812 if (defined($stat->{status
}) && $stat->{status
} =~ m/^(setup)$/im) {
817 if (defined($stat->{status
}) && $stat->{status
} =~ m/^(active|completed|failed|cancelled)$/im) {
820 if ($stat->{status
} eq 'completed') {
821 my $delay = time() - $start;
823 my $mbps = sprintf "%.2f", $memory / $delay;
824 my $downtime = $stat->{downtime
} || 0;
825 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
829 if ($stat->{status
} eq 'failed' || $stat->{status
} eq 'cancelled') {
830 $self->log('info', "migration status error: $stat->{status}");
834 if ($stat->{status
} ne 'active') {
835 $self->log('info', "migration status: $stat->{status}");
839 if ($stat->{ram
}->{transferred
} ne $lstat) {
840 my $trans = $stat->{ram
}->{transferred
} || 0;
841 my $rem = $stat->{ram
}->{remaining
} || 0;
842 my $total = $stat->{ram
}->{total
} || 0;
843 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
844 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
845 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
846 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
847 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
848 # reduce sleep if remainig memory is lower than the average transfer speed
849 $usleep = 100000 if $avglstat && $rem < $avglstat;
851 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
852 "remaining ${rem}), total ${total})");
854 if (${xbzrlecachesize
}) {
855 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
858 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
863 if ($downtimecounter > 5) {
864 $downtimecounter = 0;
865 $migrate_downtime *= 2;
866 $self->log('info', "migrate_set_downtime: $migrate_downtime");
868 mon_cmd
($vmid, "migrate_set_downtime", value
=> int($migrate_downtime*100)/100);
870 $self->log('info', "migrate_set_downtime error: $@") if $@;
876 $lstat = $stat->{ram
}->{transferred
};
880 die "unable to parse migration status '$stat->{status}' - aborting\n";
886 my ($self, $vmid, $err) = @_;
888 return if !$self->{errors
};
889 $self->{phase2errors
} = 1;
891 $self->log('info', "aborting phase 2 - cleanup resources");
893 $self->log('info', "migrate_cancel");
895 mon_cmd
($vmid, "migrate_cancel");
897 $self->log('info', "migrate_cancel error: $@") if $@;
899 my $conf = $self->{vmconf
};
900 delete $conf->{lock};
901 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
903 $self->log('err', $err);
906 # cleanup ressources on target host
907 if ($self->{storage_migration
}) {
909 eval { PVE
::QemuServer
::qemu_blockjobs_cancel
($vmid, $self->{storage_migration_jobs
}) };
911 $self->log('err', $err);
914 eval { PVE
::QemuMigrate
::cleanup_remotedisks
($self) };
916 $self->log('err', $err);
920 my $nodename = PVE
::INotify
::nodename
();
922 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
923 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
925 $self->log('err', $err);
929 if ($self->{tunnel
}) {
930 eval { finish_tunnel
($self, $self->{tunnel
}); };
932 $self->log('err', $err);
939 my ($self, $vmid) = @_;
941 my $volids = $self->{volumes
};
942 return if $self->{phase2errors
};
944 # destroy local copies
945 foreach my $volid (@$volids) {
946 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
948 $self->log('err', "removing local copy of '$volid' failed - $err");
950 last if $err =~ /^interrupted by signal$/;
956 my ($self, $vmid, $err) = @_;
958 my $conf = $self->{vmconf
};
959 return if $self->{phase2errors
};
961 my $tunnel = $self->{tunnel
};
963 if ($self->{storage_migration
}) {
965 eval { PVE
::QemuServer
::qemu_drive_mirror_monitor
($vmid, undef, $self->{storage_migration_jobs
}); };
968 eval { PVE
::QemuServer
::qemu_blockjobs_cancel
($vmid, $self->{storage_migration_jobs
}) };
969 eval { PVE
::QemuMigrate
::cleanup_remotedisks
($self) };
970 die "Failed to complete storage migration: $err\n";
972 foreach my $target_drive (keys %{$self->{target_drive
}}) {
973 my $drive = PVE
::QemuServer
::parse_drive
($target_drive, $self->{target_drive
}->{$target_drive}->{drivestr
});
974 $conf->{$target_drive} = PVE
::QemuServer
::print_drive
($drive);
975 PVE
::QemuConfig-
>write_config($vmid, $conf);
980 # transfer replication state before move config
981 $self->transfer_replication_state() if $self->{replicated_volumes
};
983 # move config to remote node
984 my $conffile = PVE
::QemuConfig-
>config_file($vmid);
985 my $newconffile = PVE
::QemuConfig-
>config_file($vmid, $self->{node
});
987 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
988 if !rename($conffile, $newconffile);
990 $self->switch_replication_job_target() if $self->{replicated_volumes
};
992 if ($self->{livemigration
}) {
993 if ($self->{storage_migration
}) {
994 # stop nbd server on remote vm - requirement for resume since 2.9
995 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'nbdstop', $vmid];
997 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
999 $self->log('err', $err);
1000 $self->{errors
} = 1;
1004 # config moved and nbd server stopped - now we can resume vm on target
1005 if ($tunnel && $tunnel->{version
} && $tunnel->{version
} >= 1) {
1007 $self->write_tunnel($tunnel, 30, "resume $vmid");
1010 $self->log('err', $err);
1011 $self->{errors
} = 1;
1014 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
1017 $self->log('err', $line);
1019 eval { PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> $logf); };
1021 $self->log('err', $err);
1022 $self->{errors
} = 1;
1026 if ($self->{storage_migration
} && PVE
::QemuServer
::parse_guest_agent
($conf)->{fstrim_cloned_disks
} && $self->{running
}) {
1027 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
1028 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
1032 # close tunnel on successful migration, on error phase2_cleanup closed it
1034 eval { finish_tunnel
($self, $tunnel); };
1036 $self->log('err', $err);
1037 $self->{errors
} = 1;
1043 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
}) && $self->{running
}) {
1044 $self->log('info', "Waiting for spice server migration");
1046 my $res = mon_cmd
($vmid, 'query-spice');
1047 last if int($res->{'migrated'}) == 1;
1048 last if $timer > 50;
1055 # always stop local VM
1056 eval { PVE
::QemuServer
::vm_stop
($self->{storecfg
}, $vmid, 1, 1); };
1058 $self->log('err', "stopping vm failed - $err");
1059 $self->{errors
} = 1;
1062 # always deactivate volumes - avoid lvm LVs to be active on several nodes
1064 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
1065 PVE
::Storage
::deactivate_volumes
($self->{storecfg
}, $vollist);
1068 $self->log('err', $err);
1069 $self->{errors
} = 1;
1072 if($self->{storage_migration
}) {
1073 # destroy local copies
1074 my $volids = $self->{online_local_volumes
};
1076 foreach my $volid (@$volids) {
1077 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
1079 $self->log('err', "removing local copy of '$volid' failed - $err");
1080 $self->{errors
} = 1;
1081 last if $err =~ /^interrupted by signal$/;
1087 # clear migrate lock
1088 my $cmd = [ @{$self->{rem_ssh
}}, 'qm', 'unlock', $vmid ];
1089 $self->cmd_logerr($cmd, errmsg
=> "failed to clear migrate lock");
1093 my ($self, $vmid) = @_;
1098 sub round_powerof2
{
1099 return 1 if $_[0] < 2;
1100 return 2 << int(log($_[0]-1)/log(2));