1 package PVE
::QemuMigrate
;
8 use Time
::HiRes
qw( usleep );
11 use PVE
::Format
qw(render_bytes);
12 use PVE
::GuestHelpers
qw(safe_boolean_ne safe_string_ne);
14 use PVE
::RPCEnvironment
;
16 use PVE
::ReplicationConfig
;
17 use PVE
::ReplicationState
;
19 use PVE
::StorageTunnel
;
24 use PVE
::QemuServer
::CPUConfig
;
25 use PVE
::QemuServer
::Drive
;
26 use PVE
::QemuServer
::Helpers
qw(min_version);
27 use PVE
::QemuServer
::Machine
;
28 use PVE
::QemuServer
::Monitor
qw(mon_cmd);
29 use PVE
::QemuServer
::Memory
qw(get_current_memory);
32 use PVE
::AbstractMigrate
;
33 use base
qw(PVE::AbstractMigrate);
35 # compared against remote end's minimum version
36 our $WS_TUNNEL_VERSION = 2;
39 my ($self, $ssh_forward_info) = @_;
41 my $cmd = ['/usr/sbin/qm', 'mtunnel'];
43 my ($level, $msg) = @_;
44 $self->log($level, $msg);
47 return PVE
::Tunnel
::fork_ssh_tunnel
($self->{rem_ssh
}, $cmd, $ssh_forward_info, $log);
50 sub fork_websocket_tunnel
{
51 my ($self, $storages, $bridges) = @_;
53 my $remote = $self->{opts
}->{remote
};
54 my $conn = $remote->{conn
};
57 my ($level, $msg) = @_;
58 $self->log($level, $msg);
61 my $websocket_url = "https://$conn->{host}:$conn->{port}/api2/json/nodes/$self->{node}/qemu/$remote->{vmid}/mtunnelwebsocket";
62 my $url = "/nodes/$self->{node}/qemu/$remote->{vmid}/mtunnel";
65 url
=> $websocket_url,
68 my $storage_list = join(',', keys %$storages);
69 my $bridge_list = join(',', keys %$bridges);
72 storages
=> $storage_list,
73 bridges
=> $bridge_list,
76 return PVE
::Tunnel
::fork_websocket_tunnel
($conn, $url, $req_params, $tunnel_params, $log);
80 # proto: unix (secure) or tcp (insecure/legacy compat)
81 # addr: IP or UNIX socket path
82 # port: optional TCP port
83 # unix_sockets: additional UNIX socket paths to forward
84 sub start_remote_tunnel
{
85 my ($self, $tunnel_info) = @_;
87 my $nodename = PVE
::INotify
::nodename
();
88 my $migration_type = $self->{opts
}->{migration_type
};
90 if ($migration_type eq 'secure') {
92 if ($tunnel_info->{proto
} eq 'unix') {
93 my $ssh_forward_info = [];
95 my $unix_sockets = [ keys %{$tunnel_info->{unix_sockets
}} ];
96 push @$unix_sockets, $tunnel_info->{addr
};
97 for my $sock (@$unix_sockets) {
98 push @$ssh_forward_info, "$sock:$sock";
102 $self->{tunnel
} = $self->fork_tunnel($ssh_forward_info);
104 my $unix_socket_try = 0; # wait for the socket to become ready
105 while ($unix_socket_try <= 100) {
108 foreach my $sock (@$unix_sockets) {
114 if ($available == @$unix_sockets) {
120 if ($unix_socket_try > 100) {
122 PVE
::Tunnel
::finish_tunnel
($self->{tunnel
});
123 die "Timeout, migration socket $tunnel_info->{addr} did not get ready";
125 $self->{tunnel
}->{unix_sockets
} = $unix_sockets if (@$unix_sockets);
127 } elsif ($tunnel_info->{proto
} eq 'tcp') {
128 my $ssh_forward_info = [];
129 if ($tunnel_info->{addr
} eq "localhost") {
130 # for backwards compatibility with older qemu-server versions
131 my $pfamily = PVE
::Tools
::get_host_address_family
($nodename);
132 my $lport = PVE
::Tools
::next_migrate_port
($pfamily);
133 push @$ssh_forward_info, "$lport:localhost:$tunnel_info->{port}";
136 $self->{tunnel
} = $self->fork_tunnel($ssh_forward_info);
139 die "unsupported protocol in migration URI: $tunnel_info->{proto}\n";
142 #fork tunnel for insecure migration, to send faster commands like resume
143 $self->{tunnel
} = $self->fork_tunnel();
148 my ($self, $vmid, $code, @param) = @_;
150 return PVE
::QemuConfig-
>lock_config($vmid, $code, @param);
153 sub target_storage_check_available
{
154 my ($self, $storecfg, $targetsid, $volid) = @_;
156 if (!$self->{opts
}->{remote
}) {
157 # check if storage is available on target node
158 my $target_scfg = PVE
::Storage
::storage_check_enabled
(
163 my ($vtype) = PVE
::Storage
::parse_volname
($storecfg, $volid);
164 die "$volid: content type '$vtype' is not available on storage '$targetsid'\n"
165 if !$target_scfg->{content
}->{$vtype};
170 my ($self, $vmid) = @_;
172 my $online = $self->{opts
}->{online
};
174 my $storecfg = $self->{storecfg
} = PVE
::Storage
::config
();
177 my $conf = $self->{vmconf
} = PVE
::QemuConfig-
>load_config($vmid);
179 my $version = PVE
::QemuServer
::Helpers
::get_node_pvecfg_version
($self->{node
});
180 my $cloudinit_config = $conf->{cloudinit
};
183 PVE
::QemuConfig-
>has_cloudinit($conf) && defined($cloudinit_config)
184 && scalar(keys %$cloudinit_config) > 0
185 && !PVE
::QemuServer
::Helpers
::pvecfg_min_version
($version, 7, 2, 13)
187 die "target node is too old (manager <= 7.2-13) and doesn't support new cloudinit section\n";
190 my $repl_conf = PVE
::ReplicationConfig-
>new();
191 $self->{replication_jobcfg
} = $repl_conf->find_local_replication_job($vmid, $self->{node
});
192 $self->{is_replicated
} = $repl_conf->check_for_existing_jobs($vmid, 1);
194 if ($self->{replication_jobcfg
} && defined($self->{replication_jobcfg
}->{remove_job
})) {
195 die "refusing to migrate replicated VM whose replication job is marked for removal\n";
198 PVE
::QemuConfig-
>check_lock($conf);
201 if (my $pid = PVE
::QemuServer
::check_running
($vmid)) {
202 die "can't migrate running VM without --online\n" if !$online;
205 if ($self->{is_replicated
} && !$self->{replication_jobcfg
}) {
206 if ($self->{opts
}->{force
}) {
207 $self->log('warn', "WARNING: Node '$self->{node}' is not a replication target. Existing " .
208 "replication jobs will fail after migration!\n");
210 die "Cannot live-migrate replicated VM to node '$self->{node}' - not a replication " .
211 "target. Use 'force' to override.\n";
215 $self->{forcemachine
} = PVE
::QemuServer
::Machine
::qemu_machine_pxe
($vmid, $conf);
217 # To support custom CPU types, we keep QEMU's "-cpu" parameter intact.
218 # Since the parameter itself contains no reference to a custom model,
219 # this makes migration independent of changes to "cpu-models.conf".
221 my $cpuconf = PVE
::JSONSchema
::parse_property_string
('pve-cpu-conf', $conf->{cpu
});
222 if ($cpuconf && PVE
::QemuServer
::CPUConfig
::is_custom_model
($cpuconf->{cputype
})) {
223 $self->{forcecpu
} = PVE
::QemuServer
::CPUConfig
::get_cpu_from_running_vm
($pid);
227 # Do not treat a suspended VM as paused, as it might wake up
228 # during migration and remain paused after migration finishes.
229 $self->{vm_was_paused
} = 1 if PVE
::QemuServer
::vm_is_paused
($vmid, 0);
232 my ($loc_res, $mapped_res, $missing_mappings_by_node) = PVE
::QemuServer
::check_local_resources
($conf, 1);
233 my $blocking_resources = [];
234 for my $res ($loc_res->@*) {
235 if (!grep($res, $mapped_res->@*)) {
236 push $blocking_resources->@*, $res;
239 if (scalar($blocking_resources->@*)) {
240 if ($self->{running
} || !$self->{opts
}->{force
}) {
241 die "can't migrate VM which uses local devices: " . join(", ", $blocking_resources->@*) . "\n";
243 $self->log('info', "migrating VM which uses local devices");
247 if (scalar($mapped_res->@*)) {
248 my $missing_mappings = $missing_mappings_by_node->{$self->{node
}};
250 die "can't migrate running VM which uses mapped devices: " . join(", ", $mapped_res->@*) . "\n";
251 } elsif (scalar($missing_mappings->@*)) {
252 die "can't migrate to '$self->{node}': missing mapped devices " . join(", ", $missing_mappings->@*) . "\n";
254 $self->log('info', "migrating VM which uses mapped local devices");
258 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
261 foreach my $volid (@$vollist) {
262 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid, 1);
264 # check if storage is available on source node
265 my $scfg = PVE
::Storage
::storage_check_enabled
($storecfg, $sid);
267 my $targetsid = $sid;
268 # NOTE: local ignores shared mappings, remote maps them
269 if (!$scfg->{shared
} || $self->{opts
}->{remote
}) {
270 $targetsid = PVE
::JSONSchema
::map_id
($self->{opts
}->{storagemap
}, $sid);
273 $storages->{$targetsid} = 1;
275 $self->target_storage_check_available($storecfg, $targetsid, $volid);
277 if ($scfg->{shared
}) {
278 # PVE::Storage::activate_storage checks this for non-shared storages
279 my $plugin = PVE
::Storage
::Plugin-
>lookup($scfg->{type
});
280 warn "Used shared storage '$sid' is not online on source node!\n"
281 if !$plugin->check_connection($sid, $scfg);
285 if ($self->{opts
}->{remote
}) {
286 # test & establish websocket connection
287 my $bridges = map_bridges
($conf, $self->{opts
}->{bridgemap
}, 1);
288 my $tunnel = $self->fork_websocket_tunnel($storages, $bridges);
289 my $min_version = $tunnel->{version
} - $tunnel->{age
};
290 $self->log('info', "local WS tunnel version: $WS_TUNNEL_VERSION");
291 $self->log('info', "remote WS tunnel version: $tunnel->{version}");
292 $self->log('info', "minimum required WS tunnel version: $min_version");
293 die "Remote tunnel endpoint not compatible, upgrade required\n"
294 if $WS_TUNNEL_VERSION < $min_version;
295 die "Remote tunnel endpoint too old, upgrade required\n"
296 if $WS_TUNNEL_VERSION > $tunnel->{version
};
298 print "websocket tunnel started\n";
299 $self->{tunnel
} = $tunnel;
301 # test ssh connection
302 my $cmd = [ @{$self->{rem_ssh
}}, '/bin/true' ];
303 eval { $self->cmd_quiet($cmd); };
304 die "Can't connect to destination address using public key\n" if $@;
310 sub scan_local_volumes
{
311 my ($self, $vmid) = @_;
313 my $conf = $self->{vmconf
};
315 # local volumes which have been copied
316 # and their old_id => new_id pairs
317 $self->{volume_map
} = {};
318 $self->{local_volumes
} = {};
320 my $storecfg = $self->{storecfg
};
322 # found local volumes and their origin
323 my $local_volumes = $self->{local_volumes
};
324 my $local_volumes_errors = {};
325 my $other_errors = [];
327 my $path_to_volid = {};
329 my $log_error = sub {
330 my ($msg, $volid) = @_;
332 if (defined($volid)) {
333 $local_volumes_errors->{$volid} = $msg;
335 push @$other_errors, $msg;
340 my $replicatable_volumes = !$self->{replication_jobcfg
} ?
{}
341 : PVE
::QemuConfig-
>get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
342 foreach my $volid (keys %{$replicatable_volumes}) {
343 $local_volumes->{$volid}->{replicated
} = 1;
346 my $test_volid = sub {
347 my ($volid, $attr) = @_;
349 if ($volid =~ m
|^/|) {
350 return if $attr->{shared
};
351 $local_volumes->{$volid}->{ref} = 'config';
352 die "local file/device\n";
355 my $snaprefs = $attr->{referenced_in_snapshot
};
357 if ($attr->{cdrom
}) {
358 if ($volid eq 'cdrom') {
359 my $msg = "can't migrate local cdrom drive";
360 if (defined($snaprefs) && !$attr->{is_attached
}) {
361 my $snapnames = join(', ', sort keys %$snaprefs);
362 $msg .= " (referenced in snapshot - $snapnames)";
364 &$log_error("$msg\n");
367 return if $volid eq 'none';
370 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
372 # check if storage is available on both nodes
373 my $scfg = PVE
::Storage
::storage_check_enabled
($storecfg, $sid);
375 my $targetsid = $sid;
376 # NOTE: local ignores shared mappings, remote maps them
377 if (!$scfg->{shared
} || $self->{opts
}->{remote
}) {
378 $targetsid = PVE
::JSONSchema
::map_id
($self->{opts
}->{storagemap
}, $sid);
381 $self->target_storage_check_available($storecfg, $targetsid, $volid);
382 return if $scfg->{shared
} && !$self->{opts
}->{remote
};
384 $local_volumes->{$volid}->{ref} = 'pending' if $attr->{referenced_in_pending
};
385 $local_volumes->{$volid}->{ref} = 'snapshot' if $attr->{referenced_in_snapshot
};
386 $local_volumes->{$volid}->{ref} = 'unused' if $attr->{is_unused
};
387 $local_volumes->{$volid}->{ref} = 'attached' if $attr->{is_attached
};
388 $local_volumes->{$volid}->{ref} = 'generated' if $attr->{is_tpmstate
};
390 $local_volumes->{$volid}->{bwlimit
} = $self->get_bwlimit($sid, $targetsid);
391 $local_volumes->{$volid}->{targetsid
} = $targetsid;
393 $local_volumes->{$volid}->@{qw(size format)} = PVE
::Storage
::volume_size_info
($storecfg, $volid);
395 $local_volumes->{$volid}->{is_vmstate
} = $attr->{is_vmstate
} ?
1 : 0;
397 $local_volumes->{$volid}->{drivename
} = $attr->{drivename
}
398 if $attr->{drivename
};
400 # If with_snapshots is not set for storage migrate, it tries to use
401 # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size
402 # back to qcow2 is currently not possible.
403 $local_volumes->{$volid}->{snapshots
} = ($local_volumes->{$volid}->{format
} =~ /^(?:qcow2|vmdk)$/);
405 if ($attr->{cdrom
}) {
406 if ($volid =~ /vm-\d+-cloudinit/) {
407 $local_volumes->{$volid}->{ref} = 'generated';
410 die "local cdrom image\n";
413 my ($path, $owner) = PVE
::Storage
::path
($storecfg, $volid);
415 die "owned by other VM (owner = VM $owner)\n"
416 if !$owner || ($owner != $vmid);
418 $path_to_volid->{$path}->{$volid} = 1;
420 return if $attr->{is_vmstate
};
422 if (defined($snaprefs)) {
423 $local_volumes->{$volid}->{snapshots
} = 1;
425 # we cannot migrate shapshots on local storage
426 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
428 die "online storage migration not possible if non-replicated snapshot exists\n"
429 if $self->{running
} && !$local_volumes->{$volid}->{replicated
};
431 die "remote migration with snapshots not supported yet\n" if $self->{opts
}->{remote
};
433 if (!($scfg->{type
} eq 'zfspool'
434 || ($scfg->{type
} eq 'btrfs' && $local_volumes->{$volid}->{format
} eq 'raw')
435 || $local_volumes->{$volid}->{format
} eq 'qcow2'
437 die "non-migratable snapshot exists\n";
441 die "referenced by linked clone(s)\n"
442 if PVE
::Storage
::volume_is_base_and_used
($storecfg, $volid);
445 PVE
::QemuServer
::foreach_volid
($conf, sub {
446 my ($volid, $attr) = @_;
447 eval { $test_volid->($volid, $attr); };
449 &$log_error($err, $volid);
453 for my $path (keys %$path_to_volid) {
454 my @volids = keys $path_to_volid->{$path}->%*;
455 die "detected not supported aliased volumes: '" . join("', '", @volids) . "'\n"
456 if (scalar(@volids) > 1);
459 foreach my $vol (sort keys %$local_volumes) {
460 my $type = $replicatable_volumes->{$vol} ?
'local, replicated' : 'local';
461 my $ref = $local_volumes->{$vol}->{ref};
462 if ($ref eq 'attached') {
463 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
464 if $self->{running
} && !$self->{opts
}->{"with-local-disks"};
465 $self->log('info', "found $type disk '$vol' (attached)\n");
466 } elsif ($ref eq 'unused') {
467 $self->log('info', "found $type disk '$vol' (unused)\n");
468 } elsif ($ref eq 'snapshot') {
469 $self->log('info', "found $type disk '$vol' (referenced by snapshot(s))\n");
470 } elsif ($ref eq 'pending') {
471 $self->log('info', "found $type disk '$vol' (pending change)\n");
472 } elsif ($ref eq 'generated') {
473 $self->log('info', "found generated disk '$vol' (in current VM config)\n");
475 $self->log('info', "found $type disk '$vol'\n");
479 foreach my $vol (sort keys %$local_volumes_errors) {
480 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
482 foreach my $err (@$other_errors) {
483 $self->log('warn', "$err");
487 die "can't migrate VM - check log\n";
490 # additional checks for local storage
491 foreach my $volid (keys %$local_volumes) {
492 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
493 my $scfg = PVE
::Storage
::storage_config
($storecfg, $sid);
495 my $migratable = $scfg->{type
} =~ /^(?:dir|btrfs|zfspool|lvmthin|lvm)$/;
497 # TODO: what is this even here for?
498 $migratable = 1 if $self->{opts
}->{remote
};
500 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
503 # image is a linked clone on local storage, se we can't migrate.
504 if (my $basename = (PVE
::Storage
::parse_volname
($storecfg, $volid))[3]) {
505 die "can't migrate '$volid' as it's a clone of '$basename'";
509 foreach my $volid (sort keys %$local_volumes) {
510 my $ref = $local_volumes->{$volid}->{ref};
511 if ($self->{running
} && $ref eq 'attached') {
512 $local_volumes->{$volid}->{migration_mode
} = 'online';
513 } elsif ($self->{running
} && $ref eq 'generated') {
514 # offline migrate the cloud-init ISO and don't regenerate on VM start
516 # tpmstate will also be offline migrated first, and in case of
517 # live migration then updated by QEMU/swtpm if necessary
518 $local_volumes->{$volid}->{migration_mode
} = 'offline';
520 $local_volumes->{$volid}->{migration_mode
} = 'offline';
524 die "Problem found while scanning volumes - $@" if $@;
527 sub handle_replication
{
528 my ($self, $vmid) = @_;
530 my $conf = $self->{vmconf
};
531 my $local_volumes = $self->{local_volumes
};
533 return if !$self->{replication_jobcfg
};
535 die "can't migrate VM with replicated volumes to remote cluster/node\n"
536 if $self->{opts
}->{remote
};
538 if ($self->{running
}) {
540 my $version = PVE
::QemuServer
::kvm_user_version
();
541 if (!min_version
($version, 4, 2)) {
542 die "can't live migrate VM with replicated volumes, pve-qemu to old (< 4.2)!\n"
545 my @live_replicatable_volumes = $self->filter_local_volumes('online', 1);
546 foreach my $volid (@live_replicatable_volumes) {
547 my $drive = $local_volumes->{$volid}->{drivename
};
548 die "internal error - no drive for '$volid'\n" if !defined($drive);
550 my $bitmap = "repl_$drive";
552 # start tracking before replication to get full delta + a few duplicates
553 $self->log('info', "$drive: start tracking writes using block-dirty-bitmap '$bitmap'");
554 mon_cmd
($vmid, 'block-dirty-bitmap-add', node
=> "drive-$drive", name
=> $bitmap);
556 # other info comes from target node in phase 2
557 $self->{target_drive
}->{$drive}->{bitmap
} = $bitmap;
560 $self->log('info', "replicating disk images");
562 my $start_time = time();
563 my $logfunc = sub { $self->log('info', shift) };
564 my $actual_replicated_volumes = PVE
::Replication
::run_replication
(
565 'PVE::QemuConfig', $self->{replication_jobcfg
}, $start_time, $start_time, $logfunc);
568 my @replicated_volumes = $self->filter_local_volumes(undef, 1);
569 foreach my $volid (@replicated_volumes) {
570 die "expected volume '$volid' to get replicated, but it wasn't\n"
571 if !$actual_replicated_volumes->{$volid};
575 sub config_update_local_disksizes
{
578 my $conf = $self->{vmconf
};
579 my $local_volumes = $self->{local_volumes
};
581 PVE
::QemuConfig-
>foreach_volume($conf, sub {
582 my ($key, $drive) = @_;
583 # skip special disks, will be handled later
584 return if $key eq 'efidisk0';
585 return if $key eq 'tpmstate0';
587 my $volid = $drive->{file
};
588 return if !defined($local_volumes->{$volid}); # only update sizes for local volumes
590 my ($updated, $msg) = PVE
::QemuServer
::Drive
::update_disksize
($drive, $local_volumes->{$volid}->{size
});
591 if (defined($updated)) {
592 $conf->{$key} = PVE
::QemuServer
::print_drive
($updated);
593 $self->log('info', "drive '$key': $msg");
597 # we want to set the efidisk size in the config to the size of the
598 # real OVMF_VARS.fd image, else we can create a too big image, which does not work
599 if (defined($conf->{efidisk0
})) {
600 PVE
::QemuServer
::update_efidisk_size
($conf);
603 # TPM state might have an irregular filesize, to avoid problems on transfer
604 # we always assume the static size of 4M to allocate on the target
605 if (defined($conf->{tpmstate0
})) {
606 PVE
::QemuServer
::update_tpmstate_size
($conf);
610 sub filter_local_volumes
{
611 my ($self, $migration_mode, $replicated) = @_;
613 my $volumes = $self->{local_volumes
};
616 foreach my $volid (sort keys %{$volumes}) {
617 next if defined($migration_mode) && safe_string_ne
($volumes->{$volid}->{migration_mode
}, $migration_mode);
618 next if defined($replicated) && safe_boolean_ne
($volumes->{$volid}->{replicated
}, $replicated);
619 push @filtered_volids, $volid;
622 return @filtered_volids;
625 sub sync_offline_local_volumes
{
628 my $local_volumes = $self->{local_volumes
};
629 my @volids = $self->filter_local_volumes('offline', 0);
631 my $storecfg = $self->{storecfg
};
632 my $opts = $self->{opts
};
634 $self->log('info', "copying local disk images") if scalar(@volids);
636 foreach my $volid (@volids) {
639 my $opts = $self->{opts
};
640 if ($opts->{remote
}) {
642 my ($level, $msg) = @_;
643 $self->log($level, $msg);
646 $new_volid = PVE
::StorageTunnel
::storage_migrate
(
651 $opts->{remote
}->{vmid
},
652 $local_volumes->{$volid},
656 my $targetsid = $local_volumes->{$volid}->{targetsid
};
658 my $bwlimit = $local_volumes->{$volid}->{bwlimit
};
659 $bwlimit = $bwlimit * 1024 if defined($bwlimit); # storage_migrate uses bps
661 my $storage_migrate_opts = {
662 'ratelimit_bps' => $bwlimit,
663 'insecure' => $opts->{migration_type
} eq 'insecure',
664 'with_snapshots' => $local_volumes->{$volid}->{snapshots
},
665 'allow_rename' => !$local_volumes->{$volid}->{is_vmstate
},
668 my $logfunc = sub { $self->log('info', $_[0]); };
670 PVE
::Storage
::storage_migrate
(
675 $storage_migrate_opts,
680 die "storage migration for '$volid' to storage '$targetsid' failed - $err\n";
684 $self->{volume_map
}->{$volid} = $new_volid;
685 $self->log('info', "volume '$volid' is '$new_volid' on the target\n");
687 eval { PVE
::Storage
::deactivate_volumes
($storecfg, [$volid]); };
689 $self->log('warn', $err);
694 sub cleanup_remotedisks
{
697 if ($self->{opts
}->{remote
}) {
698 PVE
::Tunnel
::finish_tunnel
($self->{tunnel
}, 1);
699 delete $self->{tunnel
};
703 my $local_volumes = $self->{local_volumes
};
705 foreach my $volid (values %{$self->{volume_map
}}) {
706 # don't clean up replicated disks!
707 next if $local_volumes->{$volid}->{replicated
};
709 my ($storeid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
711 my $cmd = [@{$self->{rem_ssh
}}, 'pvesm', 'free', "$storeid:$volname"];
713 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
715 $self->log('err', $err);
721 sub cleanup_bitmaps
{
723 foreach my $drive (keys %{$self->{target_drive
}}) {
724 my $bitmap = $self->{target_drive
}->{$drive}->{bitmap
};
726 $self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'");
727 mon_cmd
($self->{vmid
}, 'block-dirty-bitmap-remove', node
=> "drive-$drive", name
=> $bitmap);
732 my ($self, $vmid) = @_;
734 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
736 my $conf = $self->{vmconf
};
738 # set migrate lock in config file
739 $conf->{lock} = 'migrate';
740 PVE
::QemuConfig-
>write_config($vmid, $conf);
742 $self->scan_local_volumes($vmid);
744 # fix disk sizes to match their actual size and write changes,
745 # so that the target allocates the correct volumes
746 $self->config_update_local_disksizes();
747 PVE
::QemuConfig-
>write_config($vmid, $conf);
749 $self->handle_replication($vmid);
751 $self->sync_offline_local_volumes();
752 $self->phase1_remote($vmid) if $self->{opts
}->{remote
};
756 my ($conf, $map, $scan_only) = @_;
760 foreach my $opt (keys %$conf) {
761 next if $opt !~ m/^net\d+$/;
763 next if !$conf->{$opt};
764 my $d = PVE
::QemuServer
::parse_net
($conf->{$opt});
765 next if !$d || !$d->{bridge
};
767 my $target_bridge = PVE
::JSONSchema
::map_id
($map, $d->{bridge
});
768 $bridges->{$target_bridge}->{$opt} = $d->{bridge
};
772 $d->{bridge
} = $target_bridge;
773 $conf->{$opt} = PVE
::QemuServer
::print_net
($d);
780 my ($self, $vmid) = @_;
782 my $remote_conf = PVE
::QemuConfig-
>load_config($vmid);
783 PVE
::QemuConfig-
>update_volume_ids($remote_conf, $self->{volume_map
});
785 my $bridges = map_bridges
($remote_conf, $self->{opts
}->{bridgemap
});
786 for my $target (keys $bridges->%*) {
787 for my $nic (keys $bridges->{$target}->%*) {
788 $self->log('info', "mapped: $nic from $bridges->{$target}->{$nic} to $target");
792 my @online_local_volumes = $self->filter_local_volumes('online');
794 my $storage_map = $self->{opts
}->{storagemap
};
796 PVE
::QemuConfig-
>foreach_volume($remote_conf, sub {
797 my ($ds, $drive) = @_;
800 return if PVE
::QemuServer
::drive_is_cdrom
($drive);
802 my $volid = $drive->{file
};
805 return if !grep { $_ eq $volid} @online_local_volumes;
807 my ($storeid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
808 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $storeid);
809 my $source_format = PVE
::QemuServer
::qemu_img_format
($scfg, $volname);
811 # set by target cluster
812 my $oldvolid = delete $drive->{file
};
813 delete $drive->{format
};
815 my $targetsid = PVE
::JSONSchema
::map_id
($storage_map, $storeid);
818 format
=> $source_format,
819 storage
=> $targetsid,
823 $self->log('info', "Allocating volume for drive '$ds' on remote storage '$targetsid'..");
824 my $res = PVE
::Tunnel
::write_tunnel
($self->{tunnel
}, 600, 'disk', $params);
826 $self->log('info', "volume '$oldvolid' is '$res->{volid}' on the target\n");
827 $remote_conf->{$ds} = $res->{drivestr
};
828 $self->{nbd
}->{$ds} = $res;
831 my $conf_str = PVE
::QemuServer
::write_vm_config
("remote", $remote_conf);
833 # TODO expose in PVE::Firewall?
834 my $vm_fw_conf_path = "/etc/pve/firewall/$vmid.fw";
836 $fw_conf_str = PVE
::Tools
::file_get_contents
($vm_fw_conf_path)
837 if -e
$vm_fw_conf_path;
840 'firewall-config' => $fw_conf_str,
843 PVE
::Tunnel
::write_tunnel
($self->{tunnel
}, 10, 'config', $params);
847 my ($self, $vmid, $err) = @_;
849 $self->log('info', "aborting phase 1 - cleanup resources");
851 my $conf = $self->{vmconf
};
852 delete $conf->{lock};
853 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
855 $self->log('err', $err);
858 eval { $self->cleanup_remotedisks() };
860 $self->log('err', $err);
863 eval { $self->cleanup_bitmaps() };
865 $self->log('err', $err);
869 sub phase2_start_local_cluster
{
870 my ($self, $vmid, $params) = @_;
872 my $conf = $self->{vmconf
};
873 my $local_volumes = $self->{local_volumes
};
874 my @online_local_volumes = $self->filter_local_volumes('online');
876 my $start = $params->{start_params
};
877 my $migrate = $params->{migrate_opts
};
879 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
881 my $tunnel_info = {};
883 ## start on remote node
884 my $cmd = [@{$self->{rem_ssh
}}];
886 push @$cmd, 'qm', 'start', $vmid;
888 if ($start->{skiplock
}) {
889 push @$cmd, '--skiplock';
892 push @$cmd, '--migratedfrom', $migrate->{migratedfrom
};
894 push @$cmd, '--migration_type', $migrate->{type
};
896 push @$cmd, '--migration_network', $migrate->{network
}
897 if $migrate->{network
};
899 push @$cmd, '--stateuri', $start->{statefile
};
901 if ($start->{forcemachine
}) {
902 push @$cmd, '--machine', $start->{forcemachine
};
905 if ($start->{forcecpu
}) {
906 push @$cmd, '--force-cpu', $start->{forcecpu
};
909 if ($self->{storage_migration
}) {
910 push @$cmd, '--targetstorage', ($self->{opts
}->{targetstorage
} // '1');
914 my $input = "nbd_protocol_version: $migrate->{nbd_proto_version}\n";
916 my @offline_local_volumes = $self->filter_local_volumes('offline');
917 for my $volid (@offline_local_volumes) {
918 my $drivename = $local_volumes->{$volid}->{drivename
};
919 next if !$drivename || !$conf->{$drivename};
921 my $new_volid = $self->{volume_map
}->{$volid};
922 next if !$new_volid || $volid eq $new_volid;
924 # FIXME PVE 8.x only use offline_volume variant once all targets can handle it
925 if ($drivename eq 'tpmstate0') {
926 $input .= "$drivename: $new_volid\n"
928 $input .= "offline_volume: $drivename: $new_volid\n"
932 $input .= "spice_ticket: $migrate->{spice_ticket}\n" if $migrate->{spice_ticket
};
934 my @online_replicated_volumes = $self->filter_local_volumes('online', 1);
935 foreach my $volid (@online_replicated_volumes) {
936 $input .= "replicated_volume: $volid\n";
939 my $handle_storage_migration_listens = sub {
940 my ($drive_key, $drivestr, $nbd_uri) = @_;
942 $self->{stopnbd
} = 1;
943 $self->{target_drive
}->{$drive_key}->{drivestr
} = $drivestr;
944 $self->{target_drive
}->{$drive_key}->{nbd_uri
} = $nbd_uri;
946 my $source_drive = PVE
::QemuServer
::parse_drive
($drive_key, $conf->{$drive_key});
947 my $target_drive = PVE
::QemuServer
::parse_drive
($drive_key, $drivestr);
948 my $source_volid = $source_drive->{file
};
949 my $target_volid = $target_drive->{file
};
951 $self->{volume_map
}->{$source_volid} = $target_volid;
952 $self->log('info', "volume '$source_volid' is '$target_volid' on the target\n");
955 my $target_replicated_volumes = {};
957 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
958 # instead we pipe it through STDIN
959 my $exitcode = PVE
::Tools
::run_command
($cmd, input
=> $input, outfunc
=> sub {
962 if ($line =~ m/^migration listens on (tcp):(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
963 $tunnel_info->{addr
} = $2;
964 $tunnel_info->{port
} = int($3);
965 $tunnel_info->{proto
} = $1;
967 elsif ($line =~ m!^migration listens on (unix):(/run/qemu-server/(\d+)\.migrate)$!) {
968 $tunnel_info->{addr
} = $2;
969 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $3;
970 $tunnel_info->{proto
} = $1;
972 elsif ($line =~ m/^migration listens on port (\d+)$/) {
973 $tunnel_info->{addr
} = "localhost";
974 $tunnel_info->{port
} = int($1);
975 $tunnel_info->{proto
} = "tcp";
977 elsif ($line =~ m/^spice listens on port (\d+)$/) {
978 $spice_port = int($1);
980 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
982 my $nbd_uri = "nbd:$1:$2:exportname=$3";
983 my $targetdrive = $3;
984 $targetdrive =~ s/drive-//g;
986 $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri);
987 } elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) {
989 die "Destination UNIX socket's VMID does not match source VMID" if $vmid ne $2;
990 my $nbd_unix_addr = $1;
991 my $nbd_uri = "nbd:unix:$nbd_unix_addr:exportname=$3";
992 my $targetdrive = $3;
993 $targetdrive =~ s/drive-//g;
995 $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri);
996 $tunnel_info->{unix_sockets
}->{$nbd_unix_addr} = 1;
997 } elsif ($line =~ m/^re-using replicated volume: (\S+) - (.*)$/) {
1000 $target_replicated_volumes->{$volid} = $drive;
1001 } elsif ($line =~ m/^QEMU: (.*)$/) {
1002 $self->log('info', "[$self->{node}] $1\n");
1006 $self->log('info', "[$self->{node}] $line");
1009 die "remote command failed with exit code $exitcode\n" if $exitcode;
1011 die "unable to detect remote migration address\n" if !$tunnel_info->{addr
} || !$tunnel_info->{proto
};
1013 if (scalar(keys %$target_replicated_volumes) != scalar(@online_replicated_volumes)) {
1014 die "number of replicated disks on source and target node do not match - target node too old?\n"
1017 return ($tunnel_info, $spice_port);
1020 sub phase2_start_remote_cluster
{
1021 my ($self, $vmid, $params) = @_;
1023 die "insecure migration to remote cluster not implemented\n"
1024 if $params->{migrate_opts
}->{type
} ne 'websocket';
1026 my $remote_vmid = $self->{opts
}->{remote
}->{vmid
};
1028 # like regular start but with some overhead accounted for
1029 my $memory = get_current_memory
($self->{vmconf
}->{memory
});
1030 my $timeout = PVE
::QemuServer
::Helpers
::config_aware_timeout
($self->{vmconf
}, $memory) + 10;
1032 my $res = PVE
::Tunnel
::write_tunnel
($self->{tunnel
}, $timeout, "start", $params);
1034 foreach my $drive (keys %{$res->{drives
}}) {
1035 $self->{stopnbd
} = 1;
1036 $self->{target_drive
}->{$drive}->{drivestr
} = $res->{drives
}->{$drive}->{drivestr
};
1037 my $nbd_uri = $res->{drives
}->{$drive}->{nbd_uri
};
1038 die "unexpected NBD uri for '$drive': $nbd_uri\n"
1039 if $nbd_uri !~ s!/run/qemu-server/$remote_vmid\_!/run/qemu-server/$vmid\_!;
1041 $self->{target_drive
}->{$drive}->{nbd_uri
} = $nbd_uri;
1044 return ($res->{migrate
}, $res->{spice_port
});
1048 my ($self, $vmid) = @_;
1050 my $conf = $self->{vmconf
};
1051 my $local_volumes = $self->{local_volumes
};
1053 # version > 0 for unix socket support
1054 my $nbd_protocol_version = 1;
1057 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
1058 my $res = mon_cmd
($vmid, 'query-spice');
1059 $spice_ticket = $res->{ticket
};
1062 my $migration_type = $self->{opts
}->{migration_type
};
1063 my $state_uri = $migration_type eq 'insecure' ?
'tcp' : 'unix';
1067 statefile
=> $state_uri,
1068 forcemachine
=> $self->{forcemachine
},
1069 forcecpu
=> $self->{forcecpu
},
1073 spice_ticket
=> $spice_ticket,
1074 type
=> $migration_type,
1075 network
=> $self->{opts
}->{migration_network
},
1076 storagemap
=> $self->{opts
}->{storagemap
},
1077 migratedfrom
=> PVE
::INotify
::nodename
(),
1078 nbd_proto_version
=> $nbd_protocol_version,
1079 nbd
=> $self->{nbd
},
1083 my ($tunnel_info, $spice_port);
1085 my @online_local_volumes = $self->filter_local_volumes('online');
1086 $self->{storage_migration
} = 1 if scalar(@online_local_volumes);
1088 if (my $remote = $self->{opts
}->{remote
}) {
1089 my $remote_vmid = $remote->{vmid
};
1090 $params->{migrate_opts
}->{remote_node
} = $self->{node
};
1091 ($tunnel_info, $spice_port) = $self->phase2_start_remote_cluster($vmid, $params);
1092 die "only UNIX sockets are supported for remote migration\n"
1093 if $tunnel_info->{proto
} ne 'unix';
1095 my $remote_socket = $tunnel_info->{addr
};
1096 my $local_socket = $remote_socket;
1097 $local_socket =~ s/$remote_vmid/$vmid/g;
1098 $tunnel_info->{addr
} = $local_socket;
1100 $self->log('info', "Setting up tunnel for '$local_socket'");
1101 PVE
::Tunnel
::forward_unix_socket
($self->{tunnel
}, $local_socket, $remote_socket);
1103 foreach my $remote_socket (@{$tunnel_info->{unix_sockets
}}) {
1104 my $local_socket = $remote_socket;
1105 $local_socket =~ s/$remote_vmid/$vmid/g;
1106 next if $self->{tunnel
}->{forwarded
}->{$local_socket};
1107 $self->log('info', "Setting up tunnel for '$local_socket'");
1108 PVE
::Tunnel
::forward_unix_socket
($self->{tunnel
}, $local_socket, $remote_socket);
1111 ($tunnel_info, $spice_port) = $self->phase2_start_local_cluster($vmid, $params);
1113 $self->log('info', "start remote tunnel");
1114 $self->start_remote_tunnel($tunnel_info);
1117 my $migrate_uri = "$tunnel_info->{proto}:$tunnel_info->{addr}";
1118 $migrate_uri .= ":$tunnel_info->{port}"
1119 if defined($tunnel_info->{port
});
1121 if ($self->{storage_migration
}) {
1122 $self->{storage_migration_jobs
} = {};
1123 $self->log('info', "starting storage migration");
1125 die "The number of local disks does not match between the source and the destination.\n"
1126 if (scalar(keys %{$self->{target_drive
}}) != scalar(@online_local_volumes));
1127 foreach my $drive (keys %{$self->{target_drive
}}){
1128 my $target = $self->{target_drive
}->{$drive};
1129 my $nbd_uri = $target->{nbd_uri
};
1131 my $source_drive = PVE
::QemuServer
::parse_drive
($drive, $conf->{$drive});
1132 my $source_volid = $source_drive->{file
};
1134 my $bwlimit = $self->{local_volumes
}->{$source_volid}->{bwlimit
};
1135 my $bitmap = $target->{bitmap
};
1137 $self->log('info', "$drive: start migration to $nbd_uri");
1138 PVE
::QemuServer
::qemu_drive_mirror
($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs
}, 'skip', undef, $bwlimit, $bitmap);
1142 $self->log('info', "starting online/live migration on $migrate_uri");
1143 $self->{livemigration
} = 1;
1146 my $defaults = PVE
::QemuServer
::load_defaults
();
1148 $self->log('info', "set migration capabilities");
1149 eval { PVE
::QemuServer
::set_migration_caps
($vmid) };
1152 my $qemu_migrate_params = {};
1154 # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
1155 # migrate_speed parameter in qm.conf - take the lower of the two.
1156 my $bwlimit = $self->get_bwlimit();
1158 my $migrate_speed = $conf->{migrate_speed
} // 0;
1159 $migrate_speed *= 1024; # migrate_speed is in MB/s, bwlimit in KB/s
1161 if ($bwlimit && $migrate_speed) {
1162 $migrate_speed = ($bwlimit < $migrate_speed) ?
$bwlimit : $migrate_speed;
1164 $migrate_speed ||= $bwlimit;
1166 $migrate_speed ||= ($defaults->{migrate_speed
} || 0) * 1024;
1168 if ($migrate_speed) {
1169 $migrate_speed *= 1024; # qmp takes migrate_speed in B/s.
1170 $self->log('info', "migration speed limit: ". render_bytes
($migrate_speed, 1) ."/s");
1172 # always set migrate speed as QEMU default to 128 MiBps == 1 Gbps, use 16 GiBps == 128 Gbps
1173 $migrate_speed = (16 << 30);
1175 $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
1177 my $migrate_downtime = $defaults->{migrate_downtime
};
1178 $migrate_downtime = $conf->{migrate_downtime
} if defined($conf->{migrate_downtime
});
1179 # migrate-set-parameters expects limit in ms
1180 $migrate_downtime *= 1000;
1181 $self->log('info', "migration downtime limit: $migrate_downtime ms");
1182 $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
1184 # set cachesize to 10% of the total memory
1185 my $memory = get_current_memory
($conf->{memory
});
1186 my $cachesize = int($memory * 1048576 / 10);
1187 $cachesize = round_powerof2
($cachesize);
1189 $self->log('info', "migration cachesize: " . render_bytes
($cachesize, 1));
1190 $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
1192 $self->log('info', "set migration parameters");
1194 mon_cmd
($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
1196 $self->log('info', "migrate-set-parameters error: $@") if $@;
1198 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
}) && !$self->{opts
}->{remote
}) {
1199 my $rpcenv = PVE
::RPCEnvironment
::get
();
1200 my $authuser = $rpcenv->get_user();
1202 my (undef, $proxyticket) = PVE
::AccessControl
::assemble_spice_ticket
($authuser, $vmid, $self->{node
});
1204 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
1205 my $subject = PVE
::AccessControl
::read_x509_subject_spice
($filename);
1207 $self->log('info', "spice client_migrate_info");
1210 mon_cmd
($vmid, "client_migrate_info", protocol
=> 'spice',
1211 hostname
=> $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
1212 'cert-subject' => $subject);
1214 $self->log('info', "client_migrate_info error: $@") if $@;
1220 $self->log('info', "start migrate command to $migrate_uri");
1222 mon_cmd
($vmid, "migrate", uri
=> $migrate_uri);
1225 $self->log('info', "migrate uri => $migrate_uri failed: $merr") if $merr;
1227 my $last_mem_transferred = 0;
1228 my $usleep = 1000000;
1231 my $lastrem = undef;
1232 my $downtimecounter = 0;
1235 my $avglstat = $last_mem_transferred ?
$last_mem_transferred / $i : 0;
1239 my $stat = eval { mon_cmd
($vmid, "query-migrate") };
1242 warn "query migrate failed: $err\n";
1243 $self->log('info', "query migrate failed: $err");
1244 if ($err_count <= 5) {
1248 die "too many query migrate failures - aborting\n";
1251 my $status = $stat->{status
};
1252 if (defined($status) && $status =~ m/^(setup)$/im) {
1257 if (!defined($status) || $status !~ m/^(active|completed|failed|cancelled)$/im) {
1259 die "unable to parse migration status '$status' - aborting\n";
1264 my $memstat = $stat->{ram
};
1266 if ($status eq 'completed') {
1267 my $delay = time() - $start;
1269 my $total = $memstat->{total
} || 0;
1270 my $avg_speed = render_bytes
($total / $delay, 1);
1271 my $downtime = $stat->{downtime
} || 0;
1272 $self->log('info', "average migration speed: $avg_speed/s - downtime $downtime ms");
1276 if ($status eq 'failed' || $status eq 'cancelled') {
1277 my $message = $stat->{'error-desc'} ?
"$status - $stat->{'error-desc'}" : $status;
1278 $self->log('info', "migration status error: $message");
1282 if ($status ne 'active') {
1283 $self->log('info', "migration status: $status");
1287 if ($memstat->{transferred
} ne $last_mem_transferred) {
1288 my $trans = $memstat->{transferred
} || 0;
1289 my $rem = $memstat->{remaining
} || 0;
1290 my $total = $memstat->{total
} || 0;
1291 my $speed = ($memstat->{'pages-per-second'} // 0) * ($memstat->{'page-size'} // 0);
1292 my $dirty_rate = ($memstat->{'dirty-pages-rate'} // 0) * ($memstat->{'page-size'} // 0);
1294 # reduce sleep if remainig memory is lower than the average transfer speed
1295 $usleep = 100_000 if $avglstat && $rem < $avglstat;
1297 # also reduce loggin if we poll more frequent
1298 my $should_log = $usleep > 100_000 ?
1 : ($i % 10) == 0;
1300 my $total_h = render_bytes
($total, 1);
1301 my $transferred_h = render_bytes
($trans, 1);
1302 my $speed_h = render_bytes
($speed, 1);
1304 my $progress = "transferred $transferred_h of $total_h VM-state, ${speed_h}/s";
1306 if ($dirty_rate > $speed) {
1307 my $dirty_rate_h = render_bytes
($dirty_rate, 1);
1308 $progress .= ", VM dirties lots of memory: $dirty_rate_h/s";
1311 $self->log('info', "migration $status, $progress") if $should_log;
1313 my $xbzrle = $stat->{"xbzrle-cache"} || {};
1314 my ($xbzrlebytes, $xbzrlepages) = $xbzrle->@{'bytes', 'pages'};
1315 if ($xbzrlebytes || $xbzrlepages) {
1316 my $bytes_h = render_bytes
($xbzrlebytes, 1);
1318 my $msg = "send updates to $xbzrlepages pages in $bytes_h encoded memory";
1320 $msg .= sprintf(", cache-miss %.2f%%", $xbzrle->{'cache-miss-rate'} * 100)
1321 if $xbzrle->{'cache-miss-rate'};
1323 $msg .= ", overflow $xbzrle->{overflow}" if $xbzrle->{overflow
};
1325 $self->log('info', "xbzrle: $msg") if $should_log;
1328 if (($lastrem && $rem > $lastrem) || ($rem == 0)) {
1333 if ($downtimecounter > 5) {
1334 $downtimecounter = 0;
1335 $migrate_downtime *= 2;
1336 $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms");
1338 # migrate-set-parameters does not touch values not
1339 # specified, so this only changes downtime-limit
1340 mon_cmd
($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime));
1342 $self->log('info', "migrate-set-parameters error: $@") if $@;
1346 $last_mem_transferred = $memstat->{transferred
};
1349 if ($self->{storage_migration
}) {
1350 # finish block-job with block-job-cancel, to disconnect source VM from NBD
1351 # to avoid it trying to re-establish it. We are in blockjob ready state,
1352 # thus, this command changes to it to blockjob complete (see qapi docs)
1353 eval { PVE
::QemuServer
::qemu_drive_mirror_monitor
($vmid, undef, $self->{storage_migration_jobs
}, 'cancel'); };
1355 die "Failed to complete storage migration: $err\n";
1360 sub phase2_cleanup
{
1361 my ($self, $vmid, $err) = @_;
1363 return if !$self->{errors
};
1364 $self->{phase2errors
} = 1;
1366 $self->log('info', "aborting phase 2 - cleanup resources");
1368 $self->log('info', "migrate_cancel");
1370 mon_cmd
($vmid, "migrate_cancel");
1372 $self->log('info', "migrate_cancel error: $@") if $@;
1374 my $vm_status = eval {
1375 mon_cmd
($vmid, 'query-status')->{status
} or die "no 'status' in result\n";
1377 $self->log('err', "query-status error: $@") if $@;
1379 # Can end up in POSTMIGRATE state if failure occurred after convergence. Try going back to
1380 # original state. Unfortunately, direct transition from POSTMIGRATE to PAUSED is not possible.
1381 if ($vm_status && $vm_status eq 'postmigrate') {
1382 if (!$self->{vm_was_paused
}) {
1383 eval { mon_cmd
($vmid, 'cont'); };
1384 $self->log('err', "resuming VM failed: $@") if $@;
1386 $self->log('err', "VM was paused, but ended in postmigrate state");
1390 my $conf = $self->{vmconf
};
1391 delete $conf->{lock};
1392 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
1394 $self->log('err', $err);
1397 # cleanup ressources on target host
1398 if ($self->{storage_migration
}) {
1399 eval { PVE
::QemuServer
::qemu_blockjobs_cancel
($vmid, $self->{storage_migration_jobs
}) };
1401 $self->log('err', $err);
1405 eval { $self->cleanup_bitmaps() };
1407 $self->log('err', $err);
1410 my $nodename = PVE
::INotify
::nodename
();
1412 if ($self->{tunnel
} && $self->{tunnel
}->{version
} >= 2) {
1413 PVE
::Tunnel
::write_tunnel
($self->{tunnel
}, 10, 'stop');
1415 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
1416 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
1418 $self->log('err', $err);
1419 $self->{errors
} = 1;
1423 # cleanup after stopping, otherwise disks might be in-use by target VM!
1424 eval { PVE
::QemuMigrate
::cleanup_remotedisks
($self) };
1426 $self->log('err', $err);
1430 if ($self->{tunnel
}) {
1431 eval { PVE
::Tunnel
::finish_tunnel
($self->{tunnel
}); };
1433 $self->log('err', $err);
1434 $self->{errors
} = 1;
1440 my ($self, $vmid) = @_;
1445 sub phase3_cleanup
{
1446 my ($self, $vmid, $err) = @_;
1448 my $conf = $self->{vmconf
};
1449 return if $self->{phase2errors
};
1451 my $tunnel = $self->{tunnel
};
1453 if ($self->{volume_map
} && !$self->{opts
}->{remote
}) {
1454 my $target_drives = $self->{target_drive
};
1456 # FIXME: for NBD storage migration we now only update the volid, and
1457 # not the full drivestr from the target node. Workaround that until we
1458 # got some real rescan, to avoid things like wrong format in the drive
1459 delete $conf->{$_} for keys %$target_drives;
1460 PVE
::QemuConfig-
>update_volume_ids($conf, $self->{volume_map
});
1462 for my $drive (keys %$target_drives) {
1463 $conf->{$drive} = $target_drives->{$drive}->{drivestr
};
1465 PVE
::QemuConfig-
>write_config($vmid, $conf);
1468 # transfer replication state before move config
1469 if (!$self->{opts
}->{remote
}) {
1470 $self->transfer_replication_state() if $self->{is_replicated
};
1471 PVE
::QemuConfig-
>move_config_to_node($vmid, $self->{node
});
1472 $self->switch_replication_job_target() if $self->{is_replicated
};
1475 if ($self->{livemigration
}) {
1476 if ($self->{stopnbd
}) {
1477 $self->log('info', "stopping NBD storage migration server on target.");
1478 # stop nbd server on remote vm - requirement for resume since 2.9
1479 if ($tunnel && $tunnel->{version
} && $tunnel->{version
} >= 2) {
1481 PVE
::Tunnel
::write_tunnel
($tunnel, 30, 'nbdstop');
1484 $self->log('err', $err);
1485 $self->{errors
} = 1;
1488 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'nbdstop', $vmid];
1490 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
1492 $self->log('err', $err);
1493 $self->{errors
} = 1;
1498 # deletes local FDB entries if learning is disabled, they'll be re-added on target on resume
1499 PVE
::QemuServer
::del_nets_bridge_fdb
($conf, $vmid);
1501 if (!$self->{vm_was_paused
}) {
1502 # config moved and nbd server stopped - now we can resume vm on target
1503 if ($tunnel && $tunnel->{version
} && $tunnel->{version
} >= 1) {
1504 my $cmd = $tunnel->{version
} == 1 ?
"resume $vmid" : "resume";
1506 PVE
::Tunnel
::write_tunnel
($tunnel, 30, $cmd);
1509 $self->log('err', $err);
1510 $self->{errors
} = 1;
1513 # nocheck in case target node hasn't processed the config move/rename yet
1514 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
1517 $self->log('err', $line);
1519 eval { PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> $logf); };
1521 $self->log('err', $err);
1522 $self->{errors
} = 1;
1528 $self->{storage_migration
}
1529 && PVE
::QemuServer
::parse_guest_agent
($conf)->{fstrim_cloned_disks
}
1532 if (!$self->{vm_was_paused
}) {
1533 $self->log('info', "issuing guest fstrim");
1534 if ($self->{opts
}->{remote
}) {
1535 PVE
::Tunnel
::write_tunnel
($self->{tunnel
}, 600, 'fstrim');
1537 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
1538 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
1540 $self->log('err', "fstrim failed - $err");
1541 $self->{errors
} = 1;
1545 $self->log('info', "skipping guest fstrim, because VM is paused");
1550 # close tunnel on successful migration, on error phase2_cleanup closed it
1551 if ($tunnel && $tunnel->{version
} == 1) {
1552 eval { PVE
::Tunnel
::finish_tunnel
($tunnel); };
1554 $self->log('err', $err);
1555 $self->{errors
} = 1;
1558 delete $self->{tunnel
};
1563 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
}) && $self->{running
}) {
1564 $self->log('info', "Waiting for spice server migration");
1566 my $res = mon_cmd
($vmid, 'query-spice');
1567 last if int($res->{'migrated'}) == 1;
1568 last if $timer > 50;
1575 # always stop local VM with nocheck, since config is moved already
1576 eval { PVE
::QemuServer
::vm_stop
($self->{storecfg
}, $vmid, 1, 1); };
1578 $self->log('err', "stopping vm failed - $err");
1579 $self->{errors
} = 1;
1582 # always deactivate volumes - avoid lvm LVs to be active on several nodes
1584 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
1585 PVE
::Storage
::deactivate_volumes
($self->{storecfg
}, $vollist);
1588 $self->log('err', $err);
1589 $self->{errors
} = 1;
1592 my @not_replicated_volumes = $self->filter_local_volumes(undef, 0);
1594 # destroy local copies
1595 foreach my $volid (@not_replicated_volumes) {
1596 # remote is cleaned up below
1597 next if $self->{opts
}->{remote
};
1599 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
1601 $self->log('err', "removing local copy of '$volid' failed - $err");
1602 $self->{errors
} = 1;
1603 last if $err =~ /^interrupted by signal$/;
1607 # clear migrate lock
1608 if ($tunnel && $tunnel->{version
} >= 2) {
1609 PVE
::Tunnel
::write_tunnel
($tunnel, 10, "unlock");
1611 PVE
::Tunnel
::finish_tunnel
($tunnel);
1613 my $cmd = [ @{$self->{rem_ssh
}}, 'qm', 'unlock', $vmid ];
1614 $self->cmd_logerr($cmd, errmsg
=> "failed to clear migrate lock");
1617 if ($self->{opts
}->{remote
} && $self->{opts
}->{delete}) {
1618 eval { PVE
::QemuServer
::destroy_vm
($self->{storecfg
}, $vmid, 1, undef, 0) };
1619 warn "Failed to remove source VM - $@\n" if $@;
1624 my ($self, $vmid) = @_;
1629 sub round_powerof2
{
1630 return 1 if $_[0] < 2;
1631 return 2 << int(log($_[0]-1)/log(2));