1 package PVE
::QemuMigrate
;
8 use POSIX
qw( WNOHANG );
9 use Time
::HiRes
qw( usleep );
13 use PVE
::RPCEnvironment
;
15 use PVE
::ReplicationConfig
;
16 use PVE
::ReplicationState
;
21 use PVE
::QemuServer
::CPUConfig
;
22 use PVE
::QemuServer
::Drive
;
23 use PVE
::QemuServer
::Helpers
qw(min_version);
24 use PVE
::QemuServer
::Machine
;
25 use PVE
::QemuServer
::Monitor
qw(mon_cmd);
28 use PVE
::AbstractMigrate
;
29 use base
qw(PVE::AbstractMigrate);
31 sub fork_command_pipe
{
32 my ($self, $cmd) = @_;
34 my $reader = IO
::File-
>new();
35 my $writer = IO
::File-
>new();
41 eval { $cpid = open2
($reader, $writer, @$cmd); };
46 if ($orig_pid != $$) {
47 $self->log('err', "can't fork command pipe\n");
54 return { writer
=> $writer, reader
=> $reader, pid
=> $cpid };
57 sub finish_command_pipe
{
58 my ($self, $cmdpipe, $timeout) = @_;
60 my $cpid = $cmdpipe->{pid
};
61 return if !defined($cpid);
63 my $writer = $cmdpipe->{writer
};
64 my $reader = $cmdpipe->{reader
};
69 my $collect_child_process = sub {
70 my $res = waitpid($cpid, WNOHANG
);
71 if (defined($res) && ($res == $cpid)) {
72 delete $cmdpipe->{cpid
};
80 for (my $i = 0; $i < $timeout; $i++) {
81 return if &$collect_child_process();
86 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
90 for (my $i = 0; $i < 10; $i++) {
91 return if &$collect_child_process();
95 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
99 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
100 if !&$collect_child_process();
104 my ($self, $tunnel, $timeout) = @_;
106 $timeout = 60 if !defined($timeout);
108 my $reader = $tunnel->{reader
};
112 PVE
::Tools
::run_with_timeout
($timeout, sub { $output = <$reader>; });
114 die "reading from tunnel failed: $@\n" if $@;
122 my ($self, $tunnel, $timeout, $command) = @_;
124 $timeout = 60 if !defined($timeout);
126 my $writer = $tunnel->{writer
};
129 PVE
::Tools
::run_with_timeout
($timeout, sub {
130 print $writer "$command\n";
134 die "writing to tunnel failed: $@\n" if $@;
136 if ($tunnel->{version
} && $tunnel->{version
} >= 1) {
137 my $res = eval { $self->read_tunnel($tunnel, 10); };
138 die "no reply to command '$command': $@\n" if $@;
143 die "tunnel replied '$res' to command '$command'\n";
149 my ($self, $tunnel_addr) = @_;
151 my @localtunnelinfo = ();
152 foreach my $addr (@$tunnel_addr) {
153 push @localtunnelinfo, '-L', $addr;
156 my $cmd = [@{$self->{rem_ssh
}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ];
158 my $tunnel = $self->fork_command_pipe($cmd);
161 my $helo = $self->read_tunnel($tunnel, 60);
162 die "no reply\n" if !$helo;
163 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
164 die "got strange reply from mtunnel ('$helo')\n"
165 if $helo !~ m/^tunnel online$/;
170 my $ver = $self->read_tunnel($tunnel, 10);
171 if ($ver =~ /^ver (\d+)$/) {
172 $tunnel->{version
} = $1;
173 $self->log('info', "ssh tunnel $ver\n");
175 $err = "received invalid tunnel version string '$ver'\n" if !$err;
180 $self->finish_command_pipe($tunnel);
181 die "can't open migration tunnel - $err";
187 my ($self, $tunnel) = @_;
189 eval { $self->write_tunnel($tunnel, 30, 'quit'); };
192 $self->finish_command_pipe($tunnel, 30);
194 if ($tunnel->{sock_addr
}) {
195 # ssh does not clean up on local host
196 my $cmd = ['rm', '-f', @{$tunnel->{sock_addr
}}]; #
197 PVE
::Tools
::run_command
($cmd);
199 # .. and just to be sure check on remote side
200 unshift @{$cmd}, @{$self->{rem_ssh
}};
201 PVE
::Tools
::run_command
($cmd);
208 my ($self, $vmid, $code, @param) = @_;
210 return PVE
::QemuConfig-
>lock_config($vmid, $code, @param);
214 my ($self, $vmid) = @_;
216 my $online = $self->{opts
}->{online
};
218 $self->{storecfg
} = PVE
::Storage
::config
();
221 my $conf = $self->{vmconf
} = PVE
::QemuConfig-
>load_config($vmid);
223 PVE
::QemuConfig-
>check_lock($conf);
226 if (my $pid = PVE
::QemuServer
::check_running
($vmid)) {
227 die "can't migrate running VM without --online\n" if !$online;
230 $self->{forcemachine
} = PVE
::QemuServer
::Machine
::qemu_machine_pxe
($vmid, $conf);
232 # To support custom CPU types, we keep QEMU's "-cpu" parameter intact.
233 # Since the parameter itself contains no reference to a custom model,
234 # this makes migration independent of changes to "cpu-models.conf".
236 my $cpuconf = PVE
::QemuServer
::CPUConfig
::parse_cpu_conf_basic
($conf->{cpu
});
237 if ($cpuconf && PVE
::QemuServer
::CPUConfig
::is_custom_model
($cpuconf->{cputype
})) {
238 $self->{forcecpu
} = PVE
::QemuServer
::CPUConfig
::get_cpu_from_running_vm
($pid);
243 my $loc_res = PVE
::QemuServer
::check_local_resources
($conf, 1);
244 if (scalar @$loc_res) {
245 if ($self->{running
} || !$self->{opts
}->{force
}) {
246 die "can't migrate VM which uses local devices: " . join(", ", @$loc_res) . "\n";
248 $self->log('info', "migrating VM which uses local devices");
252 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
254 my $need_activate = [];
255 foreach my $volid (@$vollist) {
256 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid, 1);
258 # check if storage is available on both nodes
259 my $targetsid = PVE
::QemuServer
::map_storage
($self->{opts
}->{storagemap
}, $sid);
261 my $scfg = PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid);
262 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $targetsid, $self->{node
});
264 if ($scfg->{shared
}) {
265 # PVE::Storage::activate_storage checks this for non-shared storages
266 my $plugin = PVE
::Storage
::Plugin-
>lookup($scfg->{type
});
267 warn "Used shared storage '$sid' is not online on source node!\n"
268 if !$plugin->check_connection($sid, $scfg);
270 # only activate if not shared
271 next if ($volid =~ m/vm-\d+-cloudinit/);
272 push @$need_activate, $volid;
277 PVE
::Storage
::activate_volumes
($self->{storecfg
}, $need_activate);
279 # test ssh connection
280 my $cmd = [ @{$self->{rem_ssh
}}, '/bin/true' ];
281 eval { $self->cmd_quiet($cmd); };
282 die "Can't connect to destination address using public key\n" if $@;
288 my ($self, $vmid) = @_;
290 my $conf = $self->{vmconf
};
292 # local volumes which have been copied
293 # and their old_id => new_id pairs
294 $self->{volumes
} = [];
295 $self->{volume_map
} = {};
297 my $storecfg = $self->{storecfg
};
300 # found local volumes and their origin
301 my $local_volumes = {};
302 my $local_volumes_errors = {};
303 my $other_errors = [];
306 my $log_error = sub {
307 my ($msg, $volid) = @_;
309 if (defined($volid)) {
310 $local_volumes_errors->{$volid} = $msg;
312 push @$other_errors, $msg;
317 my @sids = PVE
::Storage
::storage_ids
($storecfg);
318 foreach my $storeid (@sids) {
319 my $scfg = PVE
::Storage
::storage_config
($storecfg, $storeid);
320 next if $scfg->{shared
};
321 next if !PVE
::Storage
::storage_check_enabled
($storecfg, $storeid, undef, 1);
323 # get list from PVE::Storage (for unused volumes)
324 my $dl = PVE
::Storage
::vdisk_list
($storecfg, $storeid, $vmid);
326 next if @{$dl->{$storeid}} == 0;
328 my $targetsid = PVE
::QemuServer
::map_storage
($self->{opts
}->{storagemap
}, $storeid);
329 # check if storage is available on target node
330 PVE
::Storage
::storage_check_node
($storecfg, $targetsid, $self->{node
});
332 # grandfather in existing mismatches
333 if ($targetsid ne $storeid) {
334 my $target_scfg = PVE
::Storage
::storage_config
($storecfg, $targetsid);
335 die "content type 'images' is not available on storage '$targetsid'\n"
336 if !$target_scfg->{content
}->{images
};
339 PVE
::Storage
::foreach_volid
($dl, sub {
340 my ($volid, $sid, $volinfo) = @_;
342 $local_volumes->{$volid}->{ref} = 'storage';
344 # If with_snapshots is not set for storage migrate, it tries to use
345 # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size
346 # back to qcow2 is currently not possible.
347 $local_volumes->{$volid}->{snapshots
} = ($volinfo->{format
} =~ /^(?:qcow2|vmdk)$/);
348 $local_volumes->{$volid}->{format
} = $volinfo->{format
};
352 my $rep_cfg = PVE
::ReplicationConfig-
>new();
353 my $replication_jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node
});
354 my $replicatable_volumes = !$replication_jobcfg ?
{}
355 : PVE
::QemuConfig-
>get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
357 my $test_volid = sub {
358 my ($volid, $attr) = @_;
360 if ($volid =~ m
|^/|) {
361 return if $attr->{shared
};
362 $local_volumes->{$volid}->{ref} = 'config';
363 die "local file/device\n";
366 my $snaprefs = $attr->{referenced_in_snapshot
};
368 if ($attr->{cdrom
}) {
369 if ($volid eq 'cdrom') {
370 my $msg = "can't migrate local cdrom drive";
371 if (defined($snaprefs) && !$attr->{referenced_in_config
}) {
372 my $snapnames = join(', ', sort keys %$snaprefs);
373 $msg .= " (referenced in snapshot - $snapnames)";
375 &$log_error("$msg\n");
378 return if $volid eq 'none';
381 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
383 my $targetsid = PVE
::QemuServer
::map_storage
($self->{opts
}->{storagemap
}, $sid);
384 # check if storage is available on both nodes
385 my $scfg = PVE
::Storage
::storage_check_node
($storecfg, $sid);
386 PVE
::Storage
::storage_check_node
($storecfg, $targetsid, $self->{node
});
388 return if $scfg->{shared
};
390 $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config
} ?
'config' : 'snapshot';
391 $local_volumes->{$volid}->{ref} = 'storage' if $attr->{is_unused
};
393 $local_volumes->{$volid}->{is_vmstate
} = $attr->{is_vmstate
} ?
1 : 0;
395 if ($attr->{cdrom
}) {
396 if ($volid =~ /vm-\d+-cloudinit/) {
397 $local_volumes->{$volid}->{ref} = 'generated';
400 die "local cdrom image\n";
403 my ($path, $owner) = PVE
::Storage
::path
($storecfg, $volid);
405 die "owned by other VM (owner = VM $owner)\n"
406 if !$owner || ($owner != $vmid);
408 return if $attr->{is_vmstate
};
410 if (defined($snaprefs)) {
411 $local_volumes->{$volid}->{snapshots
} = 1;
413 # we cannot migrate shapshots on local storage
414 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
416 die "online storage migration not possible if snapshot exists\n" if $self->{running
};
417 if (!($scfg->{type
} eq 'zfspool' || $local_volumes->{$volid}->{format
} eq 'qcow2')) {
418 die "non-migratable snapshot exists\n";
422 die "referenced by linked clone(s)\n"
423 if PVE
::Storage
::volume_is_base_and_used
($storecfg, $volid);
426 PVE
::QemuServer
::foreach_volid
($conf, sub {
427 my ($volid, $attr) = @_;
428 eval { $test_volid->($volid, $attr); };
430 &$log_error($err, $volid);
434 foreach my $vol (sort keys %$local_volumes) {
435 my $type = $replicatable_volumes->{$vol} ?
'local, replicated' : 'local';
436 my $ref = $local_volumes->{$vol}->{ref};
437 if ($ref eq 'storage') {
438 $self->log('info', "found $type disk '$vol' (via storage)\n");
439 } elsif ($ref eq 'config') {
440 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
441 if $self->{running
} && !$self->{opts
}->{"with-local-disks"};
442 $self->log('info', "found $type disk '$vol' (in current VM config)\n");
443 } elsif ($ref eq 'snapshot') {
444 $self->log('info', "found $type disk '$vol' (referenced by snapshot(s))\n");
445 } elsif ($ref eq 'generated') {
446 $self->log('info', "found generated disk '$vol' (in current VM config)\n");
448 $self->log('info', "found $type disk '$vol'\n");
452 foreach my $vol (sort keys %$local_volumes_errors) {
453 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
455 foreach my $err (@$other_errors) {
456 $self->log('warn', "$err");
460 die "can't migrate VM - check log\n";
463 # additional checks for local storage
464 foreach my $volid (keys %$local_volumes) {
465 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
466 my $scfg = PVE
::Storage
::storage_config
($storecfg, $sid);
468 my $migratable = $scfg->{type
} =~ /^(?:dir|zfspool|lvmthin|lvm)$/;
470 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
473 # image is a linked clone on local storage, se we can't migrate.
474 if (my $basename = (PVE
::Storage
::parse_volname
($storecfg, $volid))[3]) {
475 die "can't migrate '$volid' as it's a clone of '$basename'";
479 if ($replication_jobcfg) {
480 if ($self->{running
}) {
482 my $version = PVE
::QemuServer
::kvm_user_version
();
483 if (!min_version
($version, 4, 2)) {
484 die "can't live migrate VM with replicated volumes, pve-qemu to old (< 4.2)!\n"
487 my $live_replicatable_volumes = {};
488 PVE
::QemuConfig-
>foreach_volume($conf, sub {
489 my ($ds, $drive) = @_;
491 my $volid = $drive->{file
};
492 $live_replicatable_volumes->{$ds} = $volid
493 if defined($replicatable_volumes->{$volid});
495 foreach my $drive (keys %$live_replicatable_volumes) {
496 my $volid = $live_replicatable_volumes->{$drive};
498 my $bitmap = "repl_$drive";
500 # start tracking before replication to get full delta + a few duplicates
501 $self->log('info', "$drive: start tracking writes using block-dirty-bitmap '$bitmap'");
502 mon_cmd
($vmid, 'block-dirty-bitmap-add', node
=> "drive-$drive", name
=> $bitmap);
504 # other info comes from target node in phase 2
505 $self->{target_drive
}->{$drive}->{bitmap
} = $bitmap;
508 $self->log('info', "replicating disk images");
510 my $start_time = time();
511 my $logfunc = sub { $self->log('info', shift) };
512 $self->{replicated_volumes
} = PVE
::Replication
::run_replication
(
513 'PVE::QemuConfig', $replication_jobcfg, $start_time, $start_time, $logfunc);
516 # sizes in config have to be accurate for remote node to correctly
517 # allocate disks, rescan to be sure
518 my $volid_hash = PVE
::QemuServer
::scan_volids
($storecfg, $vmid);
519 PVE
::QemuConfig-
>foreach_volume($conf, sub {
520 my ($key, $drive) = @_;
521 my ($updated, $old_size, $new_size) = PVE
::QemuServer
::Drive
::update_disksize
($drive, $volid_hash);
522 if (defined($updated)) {
523 $conf->{$key} = PVE
::QemuServer
::print_drive
($updated);
524 $self->log('info', "size of disk '$updated->{file}' ($key) updated from $old_size to $new_size\n");
528 # we want to set the efidisk size in the config to the size of the
529 # real OVMF_VARS.fd image, else we can create a too big image, which does not work
530 if (defined($conf->{efidisk0
})) {
531 PVE
::QemuServer
::update_efidisk_size
($conf);
534 $self->log('info', "copying local disk images") if scalar(%$local_volumes);
536 foreach my $volid (keys %$local_volumes) {
537 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
538 my $targetsid = PVE
::QemuServer
::map_storage
($self->{opts
}->{storagemap
}, $sid);
539 my $ref = $local_volumes->{$volid}->{ref};
540 if ($self->{running
} && $ref eq 'config') {
541 push @{$self->{online_local_volumes
}}, $volid;
542 } elsif ($ref eq 'generated') {
543 die "can't live migrate VM with local cloudinit disk. use a shared storage instead\n" if $self->{running
};
544 # skip all generated volumes but queue them for deletion in phase3_cleanup
545 push @{$self->{volumes
}}, $volid;
548 next if $self->{replicated_volumes
}->{$volid};
549 push @{$self->{volumes
}}, $volid;
550 my $opts = $self->{opts
};
551 # use 'migrate' limit for transfer to other node
552 my $bwlimit = PVE
::Storage
::get_bandwidth_limit
('migration', [$targetsid, $sid], $opts->{bwlimit
});
553 # JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps
554 $bwlimit = $bwlimit * 1024 if defined($bwlimit);
556 my $storage_migrate_opts = {
557 'bwlimit' => $bwlimit,
558 'insecure' => $opts->{migration_type
} eq 'insecure',
559 'with_snapshots' => $local_volumes->{$volid}->{snapshots
},
560 'allow_rename' => !$local_volumes->{$volid}->{is_vmstate
},
563 my $logfunc = sub { $self->log('info', $_[0]); };
564 my $new_volid = eval {
565 PVE
::Storage
::storage_migrate
($storecfg, $volid, $self->{ssh_info
},
566 $targetsid, $storage_migrate_opts, $logfunc);
569 die "storage migration for '$volid' to storage '$targetsid' failed - $err\n";
572 $self->{volume_map
}->{$volid} = $new_volid;
573 $self->log('info', "volume '$volid' is '$new_volid' on the target\n");
577 die "Failed to sync data - $@" if $@;
580 sub cleanup_remotedisks
{
583 foreach my $target_drive (keys %{$self->{target_drive
}}) {
584 my $drivestr = $self->{target_drive
}->{$target_drive}->{drivestr
};
585 next if !defined($drivestr);
587 my $drive = PVE
::QemuServer
::parse_drive
($target_drive, $drivestr);
589 # don't clean up replicated disks!
590 next if defined($self->{replicated_volumes
}->{$drive->{file
}});
592 my ($storeid, $volname) = PVE
::Storage
::parse_volume_id
($drive->{file
});
594 my $cmd = [@{$self->{rem_ssh
}}, 'pvesm', 'free', "$storeid:$volname"];
596 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
598 $self->log('err', $err);
604 sub cleanup_bitmaps
{
606 foreach my $drive (keys %{$self->{target_drive
}}) {
607 my $bitmap = $self->{target_drive
}->{$drive}->{bitmap
};
609 $self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'");
610 mon_cmd
($self->{vmid
}, 'block-dirty-bitmap-remove', node
=> "drive-$drive", name
=> $bitmap);
615 my ($self, $vmid) = @_;
617 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
619 my $conf = $self->{vmconf
};
621 # set migrate lock in config file
622 $conf->{lock} = 'migrate';
623 PVE
::QemuConfig-
>write_config($vmid, $conf);
625 sync_disks
($self, $vmid);
627 # sync_disks fixes disk sizes to match their actual size, write changes so
628 # target allocates correct volumes
629 PVE
::QemuConfig-
>write_config($vmid, $conf);
633 my ($self, $vmid, $err) = @_;
635 $self->log('info', "aborting phase 1 - cleanup resources");
637 my $conf = $self->{vmconf
};
638 delete $conf->{lock};
639 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
641 $self->log('err', $err);
644 if ($self->{volumes
}) {
645 foreach my $volid (@{$self->{volumes
}}) {
646 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
647 # fixme: try to remove ?
651 eval { $self->cleanup_bitmaps() };
653 $self->log('err', $err);
659 my ($self, $vmid) = @_;
661 my $conf = $self->{vmconf
};
663 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
667 my $ruri; # the whole migration dst. URI (protocol:address[:port])
668 my $nodename = PVE
::INotify
::nodename
();
670 ## start on remote node
671 my $cmd = [@{$self->{rem_ssh
}}];
674 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
675 my $res = mon_cmd
($vmid, 'query-spice');
676 $spice_ticket = $res->{ticket
};
679 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
681 my $migration_type = $self->{opts
}->{migration_type
};
683 push @$cmd, '--migration_type', $migration_type;
685 push @$cmd, '--migration_network', $self->{opts
}->{migration_network
}
686 if $self->{opts
}->{migration_network
};
688 if ($migration_type eq 'insecure') {
689 push @$cmd, '--stateuri', 'tcp';
691 push @$cmd, '--stateuri', 'unix';
694 if ($self->{forcemachine
}) {
695 push @$cmd, '--machine', $self->{forcemachine
};
698 if ($self->{forcecpu
}) {
699 push @$cmd, '--force-cpu', $self->{forcecpu
};
702 if ($self->{online_local_volumes
}) {
703 push @$cmd, '--targetstorage', ($self->{opts
}->{targetstorage
} // '1');
707 my $tunnel_addr = [];
709 # version > 0 for unix socket support
710 my $nbd_protocol_version = 1;
711 # TODO change to 'spice_ticket: <ticket>\n' in 7.0
712 my $input = $spice_ticket ?
"$spice_ticket\n" : "\n";
713 $input .= "nbd_protocol_version: $nbd_protocol_version\n";
715 my $number_of_online_replicated_volumes = 0;
716 foreach my $volid (@{$self->{online_local_volumes
}}) {
717 next if !$self->{replicated_volumes
}->{$volid};
718 $number_of_online_replicated_volumes++;
719 $input .= "replicated_volume: $volid\n";
722 my $target_replicated_volumes = {};
724 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
725 # instead we pipe it through STDIN
726 my $exitcode = PVE
::Tools
::run_command
($cmd, input
=> $input, outfunc
=> sub {
729 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
732 $ruri = "tcp:$raddr:$rport";
734 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
736 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
737 $ruri = "unix:$raddr";
739 elsif ($line =~ m/^migration listens on port (\d+)$/) {
740 $raddr = "localhost";
742 $ruri = "tcp:$raddr:$rport";
744 elsif ($line =~ m/^spice listens on port (\d+)$/) {
745 $spice_port = int($1);
747 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
749 my $nbd_uri = "nbd:$1:$2:exportname=$3";
750 my $targetdrive = $3;
751 $targetdrive =~ s/drive-//g;
753 $self->{target_drive
}->{$targetdrive}->{drivestr
} = $drivestr;
754 $self->{target_drive
}->{$targetdrive}->{nbd_uri
} = $nbd_uri;
755 } elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) {
757 die "Destination UNIX socket's VMID does not match source VMID" if $vmid ne $2;
758 my $nbd_unix_addr = $1;
759 my $nbd_uri = "nbd:unix:$nbd_unix_addr:exportname=$3";
760 my $targetdrive = $3;
761 $targetdrive =~ s/drive-//g;
763 $self->{target_drive
}->{$targetdrive}->{drivestr
} = $drivestr;
764 $self->{target_drive
}->{$targetdrive}->{nbd_uri
} = $nbd_uri;
765 push @$tunnel_addr, "$nbd_unix_addr:$nbd_unix_addr";
766 push @$sock_addr, $nbd_unix_addr;
767 } elsif ($line =~ m/^re-using replicated volume: (\S+) - (.*)$/) {
770 $target_replicated_volumes->{$volid} = $drive;
771 } elsif ($line =~ m/^QEMU: (.*)$/) {
772 $self->log('info', "[$self->{node}] $1\n");
776 $self->log('info', "[$self->{node}] $line");
779 die "remote command failed with exit code $exitcode\n" if $exitcode;
781 die "unable to detect remote migration address\n" if !$raddr;
783 if (scalar(keys %$target_replicated_volumes) != $number_of_online_replicated_volumes) {
784 die "number of replicated disks on source and target node do not match - target node too old?\n"
787 $self->log('info', "start remote tunnel");
789 if ($migration_type eq 'secure') {
791 if ($ruri =~ /^unix:/) {
793 push @$tunnel_addr, "$raddr:$raddr";
794 $self->{tunnel
} = $self->fork_tunnel($tunnel_addr);
795 push @$sock_addr, $raddr;
797 my $unix_socket_try = 0; # wait for the socket to become ready
798 while ($unix_socket_try <= 100) {
801 foreach my $sock (@$sock_addr) {
807 if ($available == @$sock_addr) {
813 if ($unix_socket_try > 100) {
815 $self->finish_tunnel($self->{tunnel
});
816 die "Timeout, migration socket $ruri did not get ready";
819 } elsif ($ruri =~ /^tcp:/) {
821 if ($raddr eq "localhost") {
822 # for backwards compatibility with older qemu-server versions
823 my $pfamily = PVE
::Tools
::get_host_address_family
($nodename);
824 my $lport = PVE
::Tools
::next_migrate_port
($pfamily);
825 $tunnel_addr = "$lport:localhost:$rport";
828 $self->{tunnel
} = $self->fork_tunnel($tunnel_addr);
831 die "unsupported protocol in migration URI: $ruri\n";
834 #fork tunnel for insecure migration, to send faster commands like resume
835 $self->{tunnel
} = $self->fork_tunnel();
837 $self->{tunnel
}->{sock_addr
} = $sock_addr if (@$sock_addr);
841 my $opt_bwlimit = $self->{opts
}->{bwlimit
};
843 if (defined($self->{online_local_volumes
})) {
844 $self->{storage_migration
} = 1;
845 $self->{storage_migration_jobs
} = {};
846 $self->log('info', "starting storage migration");
848 die "The number of local disks does not match between the source and the destination.\n"
849 if (scalar(keys %{$self->{target_drive
}}) != scalar @{$self->{online_local_volumes
}});
850 foreach my $drive (keys %{$self->{target_drive
}}){
851 my $target = $self->{target_drive
}->{$drive};
852 my $nbd_uri = $target->{nbd_uri
};
854 my $source_drive = PVE
::QemuServer
::parse_drive
($drive, $conf->{$drive});
855 my $target_drive = PVE
::QemuServer
::parse_drive
($drive, $target->{drivestr
});
857 my $source_volid = $source_drive->{file
};
858 my $target_volid = $target_drive->{file
};
860 my $source_sid = PVE
::Storage
::Plugin
::parse_volume_id
($source_volid);
861 my $target_sid = PVE
::Storage
::Plugin
::parse_volume_id
($target_volid);
863 my $bwlimit = PVE
::Storage
::get_bandwidth_limit
('migration', [$source_sid, $target_sid], $opt_bwlimit);
864 my $bitmap = $target->{bitmap
};
866 $self->log('info', "$drive: start migration to $nbd_uri");
867 PVE
::QemuServer
::qemu_drive_mirror
($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs
}, 'skip', undef, $bwlimit, $bitmap);
869 $self->{volume_map
}->{$source_volid} = $target_volid;
870 $self->log('info', "volume '$source_volid' is '$target_volid' on the target\n");
874 $self->log('info', "starting online/live migration on $ruri");
875 $self->{livemigration
} = 1;
878 my $defaults = PVE
::QemuServer
::load_defaults
();
880 $self->log('info', "set migration_caps");
882 PVE
::QemuServer
::set_migration_caps
($vmid);
886 my $qemu_migrate_params = {};
888 # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
889 # migrate_speed parameter in qm.conf - take the lower of the two.
890 my $bwlimit = PVE
::Storage
::get_bandwidth_limit
('migration', undef, $opt_bwlimit) // 0;
891 my $migrate_speed = $conf->{migrate_speed
} // $bwlimit;
892 # migrate_speed is in MB/s, bwlimit in KB/s
893 $migrate_speed *= 1024;
895 $migrate_speed = ($bwlimit < $migrate_speed) ?
$bwlimit : $migrate_speed;
897 # always set migrate speed (overwrite kvm default of 32m) we set a very high
898 # default of 8192m which is basically unlimited
899 $migrate_speed ||= ($defaults->{migrate_speed
} || 8192) * 1024;
901 # qmp takes migrate_speed in B/s.
902 $migrate_speed *= 1024;
903 $self->log('info', "migration speed limit: $migrate_speed B/s");
904 $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
906 my $migrate_downtime = $defaults->{migrate_downtime
};
907 $migrate_downtime = $conf->{migrate_downtime
} if defined($conf->{migrate_downtime
});
908 # migrate-set-parameters expects limit in ms
909 $migrate_downtime *= 1000;
910 $self->log('info', "migration downtime limit: $migrate_downtime ms");
911 $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
913 # set cachesize to 10% of the total memory
914 my $memory = $conf->{memory
} || $defaults->{memory
};
915 my $cachesize = int($memory * 1048576 / 10);
916 $cachesize = round_powerof2
($cachesize);
918 $self->log('info', "migration cachesize: $cachesize B");
919 $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
921 $self->log('info', "set migration parameters");
923 mon_cmd
($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
925 $self->log('info', "migrate-set-parameters error: $@") if $@;
927 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
928 my $rpcenv = PVE
::RPCEnvironment
::get
();
929 my $authuser = $rpcenv->get_user();
931 my (undef, $proxyticket) = PVE
::AccessControl
::assemble_spice_ticket
($authuser, $vmid, $self->{node
});
933 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
934 my $subject = PVE
::AccessControl
::read_x509_subject_spice
($filename);
936 $self->log('info', "spice client_migrate_info");
939 mon_cmd
($vmid, "client_migrate_info", protocol
=> 'spice',
940 hostname
=> $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
941 'cert-subject' => $subject);
943 $self->log('info', "client_migrate_info error: $@") if $@;
947 $self->log('info', "start migrate command to $ruri");
949 mon_cmd
($vmid, "migrate", uri
=> $ruri);
952 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
955 my $usleep = 1000000;
959 my $downtimecounter = 0;
962 my $avglstat = $lstat/$i if $lstat;
967 $stat = mon_cmd
($vmid, "query-migrate");
971 warn "query migrate failed: $err\n";
972 $self->log('info', "query migrate failed: $err");
973 if ($err_count <= 5) {
977 die "too many query migrate failures - aborting\n";
980 if (defined($stat->{status
}) && $stat->{status
} =~ m/^(setup)$/im) {
985 if (defined($stat->{status
}) && $stat->{status
} =~ m/^(active|completed|failed|cancelled)$/im) {
988 if ($stat->{status
} eq 'completed') {
989 my $delay = time() - $start;
991 my $mbps = sprintf "%.2f", $memory / $delay;
992 my $downtime = $stat->{downtime
} || 0;
993 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
997 if ($stat->{status
} eq 'failed' || $stat->{status
} eq 'cancelled') {
998 $self->log('info', "migration status error: $stat->{status}");
1002 if ($stat->{status
} ne 'active') {
1003 $self->log('info', "migration status: $stat->{status}");
1007 if ($stat->{ram
}->{transferred
} ne $lstat) {
1008 my $trans = $stat->{ram
}->{transferred
} || 0;
1009 my $rem = $stat->{ram
}->{remaining
} || 0;
1010 my $total = $stat->{ram
}->{total
} || 0;
1011 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
1012 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
1013 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
1014 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
1015 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
1016 # reduce sleep if remainig memory is lower than the average transfer speed
1017 $usleep = 100000 if $avglstat && $rem < $avglstat;
1019 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
1020 "remaining ${rem}), total ${total})");
1022 if (${xbzrlecachesize
}) {
1023 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
1026 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
1031 if ($downtimecounter > 5) {
1032 $downtimecounter = 0;
1033 $migrate_downtime *= 2;
1034 $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms");
1036 # migrate-set-parameters does not touch values not
1037 # specified, so this only changes downtime-limit
1038 mon_cmd
($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime));
1040 $self->log('info', "migrate-set-parameters error: $@") if $@;
1046 $lstat = $stat->{ram
}->{transferred
};
1050 die "unable to parse migration status '$stat->{status}' - aborting\n";
1055 sub phase2_cleanup
{
1056 my ($self, $vmid, $err) = @_;
1058 return if !$self->{errors
};
1059 $self->{phase2errors
} = 1;
1061 $self->log('info', "aborting phase 2 - cleanup resources");
1063 $self->log('info', "migrate_cancel");
1065 mon_cmd
($vmid, "migrate_cancel");
1067 $self->log('info', "migrate_cancel error: $@") if $@;
1069 my $conf = $self->{vmconf
};
1070 delete $conf->{lock};
1071 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
1073 $self->log('err', $err);
1076 # cleanup ressources on target host
1077 if ($self->{storage_migration
}) {
1078 eval { PVE
::QemuServer
::qemu_blockjobs_cancel
($vmid, $self->{storage_migration_jobs
}) };
1080 $self->log('err', $err);
1084 eval { $self->cleanup_bitmaps() };
1086 $self->log('err', $err);
1089 my $nodename = PVE
::INotify
::nodename
();
1091 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
1092 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
1094 $self->log('err', $err);
1095 $self->{errors
} = 1;
1098 # cleanup after stopping, otherwise disks might be in-use by target VM!
1099 eval { PVE
::QemuMigrate
::cleanup_remotedisks
($self) };
1101 $self->log('err', $err);
1105 if ($self->{tunnel
}) {
1106 eval { finish_tunnel
($self, $self->{tunnel
}); };
1108 $self->log('err', $err);
1109 $self->{errors
} = 1;
1115 my ($self, $vmid) = @_;
1117 my $volids = $self->{volumes
};
1118 return if $self->{phase2errors
};
1120 # destroy local copies
1121 foreach my $volid (@$volids) {
1122 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
1124 $self->log('err', "removing local copy of '$volid' failed - $err");
1125 $self->{errors
} = 1;
1126 last if $err =~ /^interrupted by signal$/;
1131 sub phase3_cleanup
{
1132 my ($self, $vmid, $err) = @_;
1134 my $conf = $self->{vmconf
};
1135 return if $self->{phase2errors
};
1137 my $tunnel = $self->{tunnel
};
1139 if ($self->{storage_migration
}) {
1140 # finish block-job with block-job-cancel, to disconnect source VM from NBD
1141 # to avoid it trying to re-establish it. We are in blockjob ready state,
1142 # thus, this command changes to it to blockjob complete (see qapi docs)
1143 eval { PVE
::QemuServer
::qemu_drive_mirror_monitor
($vmid, undef, $self->{storage_migration_jobs
}, 'cancel'); };
1146 eval { PVE
::QemuServer
::qemu_blockjobs_cancel
($vmid, $self->{storage_migration_jobs
}) };
1147 eval { PVE
::QemuMigrate
::cleanup_remotedisks
($self) };
1148 die "Failed to complete storage migration: $err\n";
1152 if ($self->{volume_map
}) {
1153 my $target_drives = $self->{target_drive
};
1155 # FIXME: for NBD storage migration we now only update the volid, and
1156 # not the full drivestr from the target node. Workaround that until we
1157 # got some real rescan, to avoid things like wrong format in the drive
1158 delete $conf->{$_} for keys %$target_drives;
1159 PVE
::QemuConfig-
>update_volume_ids($conf, $self->{volume_map
});
1161 for my $drive (keys %$target_drives) {
1162 $conf->{$drive} = $target_drives->{$drive}->{drivestr
};
1164 PVE
::QemuConfig-
>write_config($vmid, $conf);
1167 # transfer replication state before move config
1168 $self->transfer_replication_state() if $self->{replicated_volumes
};
1170 # move config to remote node
1171 my $conffile = PVE
::QemuConfig-
>config_file($vmid);
1172 my $newconffile = PVE
::QemuConfig-
>config_file($vmid, $self->{node
});
1174 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
1175 if !rename($conffile, $newconffile);
1177 $self->switch_replication_job_target() if $self->{replicated_volumes
};
1179 if ($self->{livemigration
}) {
1180 if ($self->{storage_migration
}) {
1181 # stop nbd server on remote vm - requirement for resume since 2.9
1182 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'nbdstop', $vmid];
1184 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
1186 $self->log('err', $err);
1187 $self->{errors
} = 1;
1191 # config moved and nbd server stopped - now we can resume vm on target
1192 if ($tunnel && $tunnel->{version
} && $tunnel->{version
} >= 1) {
1194 $self->write_tunnel($tunnel, 30, "resume $vmid");
1197 $self->log('err', $err);
1198 $self->{errors
} = 1;
1201 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
1204 $self->log('err', $line);
1206 eval { PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> $logf); };
1208 $self->log('err', $err);
1209 $self->{errors
} = 1;
1213 if ($self->{storage_migration
} && PVE
::QemuServer
::parse_guest_agent
($conf)->{fstrim_cloned_disks
} && $self->{running
}) {
1214 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
1215 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
1219 # close tunnel on successful migration, on error phase2_cleanup closed it
1221 eval { finish_tunnel
($self, $tunnel); };
1223 $self->log('err', $err);
1224 $self->{errors
} = 1;
1230 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
}) && $self->{running
}) {
1231 $self->log('info', "Waiting for spice server migration");
1233 my $res = mon_cmd
($vmid, 'query-spice');
1234 last if int($res->{'migrated'}) == 1;
1235 last if $timer > 50;
1242 # always stop local VM
1243 eval { PVE
::QemuServer
::vm_stop
($self->{storecfg
}, $vmid, 1, 1); };
1245 $self->log('err', "stopping vm failed - $err");
1246 $self->{errors
} = 1;
1249 # always deactivate volumes - avoid lvm LVs to be active on several nodes
1251 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
1252 PVE
::Storage
::deactivate_volumes
($self->{storecfg
}, $vollist);
1255 $self->log('err', $err);
1256 $self->{errors
} = 1;
1259 if($self->{storage_migration
}) {
1260 # destroy local copies
1261 my $volids = $self->{online_local_volumes
};
1263 foreach my $volid (@$volids) {
1264 # keep replicated volumes!
1265 next if $self->{replicated_volumes
}->{$volid};
1267 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
1269 $self->log('err', "removing local copy of '$volid' failed - $err");
1270 $self->{errors
} = 1;
1271 last if $err =~ /^interrupted by signal$/;
1277 # clear migrate lock
1278 my $cmd = [ @{$self->{rem_ssh
}}, 'qm', 'unlock', $vmid ];
1279 $self->cmd_logerr($cmd, errmsg
=> "failed to clear migrate lock");
1283 my ($self, $vmid) = @_;
1288 sub round_powerof2
{
1289 return 1 if $_[0] < 2;
1290 return 2 << int(log($_[0]-1)/log(2));