]> git.proxmox.com Git - qemu-server.git/blame - PVE/QemuMigrate.pm
migration: fix issue with qcow2 cloudinit disk live migration
[qemu-server.git] / PVE / QemuMigrate.pm
CommitLineData
3ea94c60 1package PVE::QemuMigrate;
1ef75254 2
1e3baf05 3use strict;
3ea94c60 4use warnings;
6d7450cb 5
3ea94c60 6use IO::File;
1e3baf05 7use IPC::Open2;
6d7450cb
TL
8use Time::HiRes qw( usleep );
9
3ea94c60 10use PVE::Cluster;
eef93bc5 11use PVE::Format qw(render_bytes);
4b26ffbf 12use PVE::GuestHelpers qw(safe_boolean_ne safe_string_ne);
6d7450cb
TL
13use PVE::INotify;
14use PVE::RPCEnvironment;
15use PVE::Replication;
16use PVE::ReplicationConfig;
17use PVE::ReplicationState;
1e3baf05 18use PVE::Storage;
eef93bc5 19use PVE::StorageTunnel;
6d7450cb 20use PVE::Tools;
e594231b 21use PVE::Tunnel;
6d7450cb 22
912792e2 23use PVE::QemuConfig;
58c64ad5 24use PVE::QemuServer::CPUConfig;
e0fd2b2f 25use PVE::QemuServer::Drive;
28e6e180 26use PVE::QemuServer::Helpers qw(min_version);
3392d6ca 27use PVE::QemuServer::Machine;
0a13e08e 28use PVE::QemuServer::Monitor qw(mon_cmd);
6d7450cb 29use PVE::QemuServer;
1e3baf05 30
6d7450cb 31use PVE::AbstractMigrate;
16e903f2 32use base qw(PVE::AbstractMigrate);
1e3baf05 33
eef93bc5
FG
34# compared against remote end's minimum version
35our $WS_TUNNEL_VERSION = 2;
36
1e3baf05 37sub fork_tunnel {
ae194a5c 38 my ($self, $ssh_forward_info) = @_;
1e3baf05 39
e594231b
FG
40 my $cmd = ['/usr/sbin/qm', 'mtunnel'];
41 my $log = sub {
42 my ($level, $msg) = @_;
43 $self->log($level, $msg);
1e3baf05 44 };
1c9d54bf 45
e594231b 46 return PVE::Tunnel::fork_ssh_tunnel($self->{rem_ssh}, $cmd, $ssh_forward_info, $log);
1e3baf05
DM
47}
48
eef93bc5
FG
49sub fork_websocket_tunnel {
50 my ($self, $storages, $bridges) = @_;
51
52 my $remote = $self->{opts}->{remote};
53 my $conn = $remote->{conn};
54
55 my $log = sub {
56 my ($level, $msg) = @_;
57 $self->log($level, $msg);
58 };
59
60 my $websocket_url = "https://$conn->{host}:$conn->{port}/api2/json/nodes/$self->{node}/qemu/$remote->{vmid}/mtunnelwebsocket";
61 my $url = "/nodes/$self->{node}/qemu/$remote->{vmid}/mtunnel";
62
63 my $tunnel_params = {
64 url => $websocket_url,
65 };
66
67 my $storage_list = join(',', keys %$storages);
68 my $bridge_list = join(',', keys %$bridges);
69
70 my $req_params = {
71 storages => $storage_list,
72 bridges => $bridge_list,
73 };
74
75 return PVE::Tunnel::fork_websocket_tunnel($conn, $url, $req_params, $tunnel_params, $log);
76}
77
05b2a4ae
FG
78# tunnel_info:
79# proto: unix (secure) or tcp (insecure/legacy compat)
80# addr: IP or UNIX socket path
81# port: optional TCP port
82# unix_sockets: additional UNIX socket paths to forward
7d730f95 83sub start_remote_tunnel {
05b2a4ae 84 my ($self, $tunnel_info) = @_;
7d730f95
FE
85
86 my $nodename = PVE::INotify::nodename();
87 my $migration_type = $self->{opts}->{migration_type};
88
89 if ($migration_type eq 'secure') {
90
05b2a4ae
FG
91 if ($tunnel_info->{proto} eq 'unix') {
92 my $ssh_forward_info = [];
7d730f95 93
05b2a4ae
FG
94 my $unix_sockets = [ keys %{$tunnel_info->{unix_sockets}} ];
95 push @$unix_sockets, $tunnel_info->{addr};
7d730f95
FE
96 for my $sock (@$unix_sockets) {
97 push @$ssh_forward_info, "$sock:$sock";
98 unlink $sock;
99 }
100
101 $self->{tunnel} = $self->fork_tunnel($ssh_forward_info);
102
103 my $unix_socket_try = 0; # wait for the socket to become ready
104 while ($unix_socket_try <= 100) {
105 $unix_socket_try++;
106 my $available = 0;
107 foreach my $sock (@$unix_sockets) {
108 if (-S $sock) {
109 $available++;
110 }
111 }
112
113 if ($available == @$unix_sockets) {
114 last;
115 }
116
117 usleep(50000);
118 }
119 if ($unix_socket_try > 100) {
120 $self->{errors} = 1;
e594231b 121 PVE::Tunnel::finish_tunnel($self->{tunnel});
05b2a4ae 122 die "Timeout, migration socket $tunnel_info->{addr} did not get ready";
7d730f95
FE
123 }
124 $self->{tunnel}->{unix_sockets} = $unix_sockets if (@$unix_sockets);
125
05b2a4ae 126 } elsif ($tunnel_info->{proto} eq 'tcp') {
7d730f95 127 my $ssh_forward_info = [];
05b2a4ae 128 if ($tunnel_info->{addr} eq "localhost") {
7d730f95
FE
129 # for backwards compatibility with older qemu-server versions
130 my $pfamily = PVE::Tools::get_host_address_family($nodename);
131 my $lport = PVE::Tools::next_migrate_port($pfamily);
05b2a4ae 132 push @$ssh_forward_info, "$lport:localhost:$tunnel_info->{port}";
7d730f95
FE
133 }
134
135 $self->{tunnel} = $self->fork_tunnel($ssh_forward_info);
136
137 } else {
05b2a4ae 138 die "unsupported protocol in migration URI: $tunnel_info->{proto}\n";
7d730f95
FE
139 }
140 } else {
141 #fork tunnel for insecure migration, to send faster commands like resume
142 $self->{tunnel} = $self->fork_tunnel();
143 }
144}
145
16e903f2
DM
146sub lock_vm {
147 my ($self, $vmid, $code, @param) = @_;
f5eb281a 148
ffda963f 149 return PVE::QemuConfig->lock_config($vmid, $code, @param);
16e903f2 150}
ff1a2432 151
e3aad441
AL
152sub target_storage_check_available {
153 my ($self, $storecfg, $targetsid, $volid) = @_;
154
155 if (!$self->{opts}->{remote}) {
156 # check if storage is available on target node
157 my $target_scfg = PVE::Storage::storage_check_enabled(
158 $storecfg,
159 $targetsid,
160 $self->{node},
161 );
162 my ($vtype) = PVE::Storage::parse_volname($storecfg, $volid);
163 die "$volid: content type '$vtype' is not available on storage '$targetsid'\n"
164 if !$target_scfg->{content}->{$vtype};
165 }
166}
167
16e903f2
DM
168sub prepare {
169 my ($self, $vmid) = @_;
ff1a2432 170
16e903f2 171 my $online = $self->{opts}->{online};
3ea94c60 172
8a5bd889 173 my $storecfg = $self->{storecfg} = PVE::Storage::config();
3ea94c60 174
e1fc368d 175 # test if VM exists
ffda963f 176 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
3ea94c60 177
9c88e854
AD
178 my $version = PVE::QemuServer::Helpers::get_node_pvecfg_version($self->{node});
179 my $cloudinit_config = $conf->{cloudinit};
180
71cc2c41
TL
181 if (
182 PVE::QemuConfig->has_cloudinit($conf) && defined($cloudinit_config)
183 && scalar(keys %$cloudinit_config) > 0
184 && !PVE::QemuServer::Helpers::pvecfg_min_version($version, 7, 2, 13)
185 ) {
186 die "target node is too old (manager <= 7.2-13) and doesn't support new cloudinit section\n";
9c88e854
AD
187 }
188
c2c96d73
FE
189 my $repl_conf = PVE::ReplicationConfig->new();
190 $self->{replication_jobcfg} = $repl_conf->find_local_replication_job($vmid, $self->{node});
191 $self->{is_replicated} = $repl_conf->check_for_existing_jobs($vmid, 1);
192
19ff3682
FE
193 if ($self->{replication_jobcfg} && defined($self->{replication_jobcfg}->{remove_job})) {
194 die "refusing to migrate replicated VM whose replication job is marked for removal\n";
195 }
196
ffda963f 197 PVE::QemuConfig->check_lock($conf);
3ea94c60 198
16e903f2
DM
199 my $running = 0;
200 if (my $pid = PVE::QemuServer::check_running($vmid)) {
b6adff33 201 die "can't migrate running VM without --online\n" if !$online;
16e903f2 202 $running = $pid;
42dbd2ee 203
c2c96d73 204 if ($self->{is_replicated} && !$self->{replication_jobcfg}) {
68980d66
FE
205 if ($self->{opts}->{force}) {
206 $self->log('warn', "WARNING: Node '$self->{node}' is not a replication target. Existing " .
207 "replication jobs will fail after migration!\n");
208 } else {
209 die "Cannot live-migrate replicated VM to node '$self->{node}' - not a replication " .
210 "target. Use 'force' to override.\n";
211 }
212 }
213
3392d6ca 214 $self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf);
7bac824e 215
58c64ad5
SR
216 # To support custom CPU types, we keep QEMU's "-cpu" parameter intact.
217 # Since the parameter itself contains no reference to a custom model,
218 # this makes migration independent of changes to "cpu-models.conf".
219 if ($conf->{cpu}) {
b53ba8d0 220 my $cpuconf = PVE::JSONSchema::parse_property_string('pve-cpu-conf', $conf->{cpu});
58c64ad5
SR
221 if ($cpuconf && PVE::QemuServer::CPUConfig::is_custom_model($cpuconf->{cputype})) {
222 $self->{forcecpu} = PVE::QemuServer::CPUConfig::get_cpu_from_running_vm($pid);
223 }
224 }
a183576e
FE
225
226 $self->{vm_was_paused} = 1 if PVE::QemuServer::vm_is_paused($vmid);
3ea94c60 227 }
58c64ad5 228
92cd9b18
DC
229 my ($loc_res, $mapped_res, $missing_mappings_by_node) = PVE::QemuServer::check_local_resources($conf, 1);
230 my $blocking_resources = [];
231 for my $res ($loc_res->@*) {
232 if (!grep($res, $mapped_res->@*)) {
233 push $blocking_resources->@*, $res;
234 }
235 }
236 if (scalar($blocking_resources->@*)) {
16e903f2 237 if ($self->{running} || !$self->{opts}->{force}) {
92cd9b18 238 die "can't migrate VM which uses local devices: " . join(", ", $blocking_resources->@*) . "\n";
16e903f2
DM
239 } else {
240 $self->log('info', "migrating VM which uses local devices");
241 }
3ea94c60
DM
242 }
243
92cd9b18
DC
244 if (scalar($mapped_res->@*)) {
245 my $missing_mappings = $missing_mappings_by_node->{$self->{node}};
246 if ($running) {
247 die "can't migrate running VM which uses mapped devices: " . join(", ", $mapped_res->@*) . "\n";
248 } elsif (scalar($missing_mappings->@*)) {
249 die "can't migrate to '$self->{node}': missing mapped devices " . join(", ", $missing_mappings->@*) . "\n";
250 } else {
251 $self->log('info', "migrating VM which uses mapped local devices");
252 }
253 }
254
ff1a2432 255 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
eef93bc5
FG
256
257 my $storages = {};
29701766
FG
258 foreach my $volid (@$vollist) {
259 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
260
eef93bc5 261 # check if storage is available on source node
8a5bd889 262 my $scfg = PVE::Storage::storage_check_enabled($storecfg, $sid);
d213ba29 263
95b3583b 264 my $targetsid = $sid;
eef93bc5
FG
265 # NOTE: local ignores shared mappings, remote maps them
266 if (!$scfg->{shared} || $self->{opts}->{remote}) {
82a03671 267 $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid);
d213ba29
FE
268 }
269
eef93bc5 270 $storages->{$targetsid} = 1;
24b84b47 271
e3aad441 272 $self->target_storage_check_available($storecfg, $targetsid, $volid);
73f5ee92
FG
273
274 if ($scfg->{shared}) {
275 # PVE::Storage::activate_storage checks this for non-shared storages
276 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
277 warn "Used shared storage '$sid' is not online on source node!\n"
278 if !$plugin->check_connection($sid, $scfg);
73f5ee92 279 }
29701766 280 }
3ea94c60 281
eef93bc5
FG
282 if ($self->{opts}->{remote}) {
283 # test & establish websocket connection
284 my $bridges = map_bridges($conf, $self->{opts}->{bridgemap}, 1);
285 my $tunnel = $self->fork_websocket_tunnel($storages, $bridges);
286 my $min_version = $tunnel->{version} - $tunnel->{age};
287 $self->log('info', "local WS tunnel version: $WS_TUNNEL_VERSION");
288 $self->log('info', "remote WS tunnel version: $tunnel->{version}");
289 $self->log('info', "minimum required WS tunnel version: $min_version");
290 die "Remote tunnel endpoint not compatible, upgrade required\n"
291 if $WS_TUNNEL_VERSION < $min_version;
292 die "Remote tunnel endpoint too old, upgrade required\n"
293 if $WS_TUNNEL_VERSION > $tunnel->{version};
294
295 print "websocket tunnel started\n";
296 $self->{tunnel} = $tunnel;
297 } else {
298 # test ssh connection
299 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
300 eval { $self->cmd_quiet($cmd); };
301 die "Can't connect to destination address using public key\n" if $@;
302 }
ff1a2432 303
16e903f2 304 return $running;
3ea94c60
DM
305}
306
d10b78f4 307sub scan_local_volumes {
16e903f2
DM
308 my ($self, $vmid) = @_;
309
16e903f2
DM
310 my $conf = $self->{vmconf};
311
dabf2473 312 # local volumes which have been copied
37666e4c 313 # and their old_id => new_id pairs
37666e4c 314 $self->{volume_map} = {};
d10b78f4 315 $self->{local_volumes} = {};
3ea94c60 316
b10afa31 317 my $storecfg = $self->{storecfg};
3ea94c60 318 eval {
dabf2473 319 # found local volumes and their origin
d10b78f4 320 my $local_volumes = $self->{local_volumes};
5bf7f0f1
FG
321 my $local_volumes_errors = {};
322 my $other_errors = [];
323 my $abort = 0;
07f2e57c 324 my $path_to_volid = {};
3ea94c60 325
5bf7f0f1
FG
326 my $log_error = sub {
327 my ($msg, $volid) = @_;
328
329 if (defined($volid)) {
330 $local_volumes_errors->{$volid} = $msg;
331 } else {
332 push @$other_errors, $msg;
333 }
334 $abort = 1;
335 };
336
c2c96d73 337 my $replicatable_volumes = !$self->{replication_jobcfg} ? {}
2cd808d3 338 : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
4b26ffbf
FE
339 foreach my $volid (keys %{$replicatable_volumes}) {
340 $local_volumes->{$volid}->{replicated} = 1;
341 }
b9f44d27 342
3629c19d 343 my $test_volid = sub {
aee6abe5 344 my ($volid, $attr) = @_;
3ea94c60 345
5bf7f0f1 346 if ($volid =~ m|^/|) {
ec82e3ee 347 return if $attr->{shared};
6f58fce9 348 $local_volumes->{$volid}->{ref} = 'config';
5bf7f0f1
FG
349 die "local file/device\n";
350 }
3ea94c60 351
aee6abe5
DM
352 my $snaprefs = $attr->{referenced_in_snapshot};
353
354 if ($attr->{cdrom}) {
5bf7f0f1
FG
355 if ($volid eq 'cdrom') {
356 my $msg = "can't migrate local cdrom drive";
6e9c4929 357 if (defined($snaprefs) && !$attr->{is_attached}) {
aee6abe5 358 my $snapnames = join(', ', sort keys %$snaprefs);
5009a8c7 359 $msg .= " (referenced in snapshot - $snapnames)";
aee6abe5 360 }
5bf7f0f1
FG
361 &$log_error("$msg\n");
362 return;
363 }
3ea94c60 364 return if $volid eq 'none';
3ea94c60
DM
365 }
366
367 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
368
16e903f2 369 # check if storage is available on both nodes
0d2db084 370 my $scfg = PVE::Storage::storage_check_enabled($storecfg, $sid);
d213ba29 371
95b3583b 372 my $targetsid = $sid;
eef93bc5
FG
373 # NOTE: local ignores shared mappings, remote maps them
374 if (!$scfg->{shared} || $self->{opts}->{remote}) {
82a03671 375 $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid);
d213ba29
FE
376 }
377
e3aad441
AL
378 $self->target_storage_check_available($storecfg, $targetsid, $volid);
379 return if $scfg->{shared} && !$self->{opts}->{remote};
3ea94c60 380
6e9c4929
AL
381 $local_volumes->{$volid}->{ref} = 'pending' if $attr->{referenced_in_pending};
382 $local_volumes->{$volid}->{ref} = 'snapshot' if $attr->{referenced_in_snapshot};
383 $local_volumes->{$volid}->{ref} = 'unused' if $attr->{is_unused};
384 $local_volumes->{$volid}->{ref} = 'attached' if $attr->{is_attached};
f9dde219 385 $local_volumes->{$volid}->{ref} = 'generated' if $attr->{is_tpmstate};
d62fcf74 386
a0dbed5a
AL
387 $local_volumes->{$volid}->{bwlimit} = $self->get_bwlimit($sid, $targetsid);
388 $local_volumes->{$volid}->{targetsid} = $targetsid;
389
390 $local_volumes->{$volid}->@{qw(size format)} = PVE::Storage::volume_size_info($storecfg, $volid);
391
cc1a3820
FE
392 $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0;
393
a6be63ac
FE
394 $local_volumes->{$volid}->{drivename} = $attr->{drivename}
395 if $attr->{drivename};
396
e9f12f96
FE
397 # If with_snapshots is not set for storage migrate, it tries to use
398 # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size
399 # back to qcow2 is currently not possible.
400 $local_volumes->{$volid}->{snapshots} = ($local_volumes->{$volid}->{format} =~ /^(?:qcow2|vmdk)$/);
401
9e93a63f
ML
402 if ($attr->{cdrom}) {
403 if ($volid =~ /vm-\d+-cloudinit/) {
404 $local_volumes->{$volid}->{ref} = 'generated';
405 return;
406 }
407 die "local cdrom image\n";
408 }
3629c19d 409
b10afa31 410 my ($path, $owner) = PVE::Storage::path($storecfg, $volid);
3ea94c60 411
5bf7f0f1 412 die "owned by other VM (owner = VM $owner)\n"
b10afa31 413 if !$owner || ($owner != $vmid);
3ea94c60 414
07f2e57c
AL
415 $path_to_volid->{$path}->{$volid} = 1;
416
b24f07d4
FE
417 return if $attr->{is_vmstate};
418
aee6abe5 419 if (defined($snaprefs)) {
5eca0c36
FE
420 $local_volumes->{$volid}->{snapshots} = 1;
421
3629c19d
DM
422 # we cannot migrate shapshots on local storage
423 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
424
5cbf4d72
FE
425 die "online storage migration not possible if non-replicated snapshot exists\n"
426 if $self->{running} && !$local_volumes->{$volid}->{replicated};
427
eef93bc5
FG
428 die "remote migration with snapshots not supported yet\n" if $self->{opts}->{remote};
429
205dbf39
WB
430 if (!($scfg->{type} eq 'zfspool'
431 || ($scfg->{type} eq 'btrfs' && $local_volumes->{$volid}->{format} eq 'raw')
432 || $local_volumes->{$volid}->{format} eq 'qcow2'
433 )) {
5bf7f0f1 434 die "non-migratable snapshot exists\n";
3629c19d 435 }
3629c19d 436 }
3a7bc9e2
FG
437
438 die "referenced by linked clone(s)\n"
b10afa31 439 if PVE::Storage::volume_is_base_and_used($storecfg, $volid);
3629c19d
DM
440 };
441
0b7a0b78 442 PVE::QemuServer::foreach_volid($conf, sub {
aee6abe5
DM
443 my ($volid, $attr) = @_;
444 eval { $test_volid->($volid, $attr); };
445 if (my $err = $@) {
446 &$log_error($err, $volid);
447 }
448 });
3ea94c60 449
07f2e57c
AL
450 for my $path (keys %$path_to_volid) {
451 my @volids = keys $path_to_volid->{$path}->%*;
1e540583 452 die "detected not supported aliased volumes: '" . join("', '", @volids) . "'\n"
07f2e57c
AL
453 if (scalar(@volids) > 1);
454 }
455
dabf2473 456 foreach my $vol (sort keys %$local_volumes) {
b9f44d27 457 my $type = $replicatable_volumes->{$vol} ? 'local, replicated' : 'local';
6f58fce9 458 my $ref = $local_volumes->{$vol}->{ref};
6e9c4929 459 if ($ref eq 'attached') {
dbc9420b
DM
460 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
461 if $self->{running} && !$self->{opts}->{"with-local-disks"};
6e9c4929
AL
462 $self->log('info', "found $type disk '$vol' (attached)\n");
463 } elsif ($ref eq 'unused') {
464 $self->log('info', "found $type disk '$vol' (unused)\n");
6f58fce9 465 } elsif ($ref eq 'snapshot') {
b9f44d27 466 $self->log('info', "found $type disk '$vol' (referenced by snapshot(s))\n");
6e9c4929
AL
467 } elsif ($ref eq 'pending') {
468 $self->log('info', "found $type disk '$vol' (pending change)\n");
9e93a63f
ML
469 } elsif ($ref eq 'generated') {
470 $self->log('info', "found generated disk '$vol' (in current VM config)\n");
d62fcf74 471 } else {
b9f44d27 472 $self->log('info', "found $type disk '$vol'\n");
d62fcf74
FG
473 }
474 }
475
5bf7f0f1
FG
476 foreach my $vol (sort keys %$local_volumes_errors) {
477 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
478 }
479 foreach my $err (@$other_errors) {
480 $self->log('warn', "$err");
481 }
482
5bf7f0f1
FG
483 if ($abort) {
484 die "can't migrate VM - check log\n";
485 }
486
c4d2d6c1 487 # additional checks for local storage
dabf2473 488 foreach my $volid (keys %$local_volumes) {
3ea94c60 489 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
b10afa31 490 my $scfg = PVE::Storage::storage_config($storecfg, $sid);
3ea94c60 491
205dbf39 492 my $migratable = $scfg->{type} =~ /^(?:dir|btrfs|zfspool|lvmthin|lvm)$/;
c4d2d6c1 493
eef93bc5
FG
494 # TODO: what is this even here for?
495 $migratable = 1 if $self->{opts}->{remote};
496
37a6dc78 497 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
c4d2d6c1 498 if !$migratable;
d5604092 499
c4d2d6c1 500 # image is a linked clone on local storage, se we can't migrate.
b10afa31 501 if (my $basename = (PVE::Storage::parse_volname($storecfg, $volid))[3]) {
c4d2d6c1 502 die "can't migrate '$volid' as it's a clone of '$basename'";
d5604092 503 }
3ea94c60
DM
504 }
505
eb3acec8 506 foreach my $volid (sort keys %$local_volumes) {
9e93a63f 507 my $ref = $local_volumes->{$volid}->{ref};
6e9c4929 508 if ($self->{running} && $ref eq 'attached') {
efe0d457 509 $local_volumes->{$volid}->{migration_mode} = 'online';
ad8b9d5e 510 } elsif ($self->{running} && $ref eq 'generated') {
104f47a9 511 # offline migrate the cloud-init ISO and don't regenerate on VM start
f9dde219
SR
512 #
513 # tpmstate will also be offline migrated first, and in case of
514 # live migration then updated by QEMU/swtpm if necessary
104f47a9 515 $local_volumes->{$volid}->{migration_mode} = 'offline';
b74cad8a 516 } else {
d10b78f4 517 $local_volumes->{$volid}->{migration_mode} = 'offline';
b74cad8a 518 }
3ea94c60
DM
519 }
520 };
d10b78f4
FE
521 die "Problem found while scanning volumes - $@" if $@;
522}
523
a6be63ac
FE
524sub handle_replication {
525 my ($self, $vmid) = @_;
526
527 my $conf = $self->{vmconf};
528 my $local_volumes = $self->{local_volumes};
529
530 return if !$self->{replication_jobcfg};
eef93bc5
FG
531
532 die "can't migrate VM with replicated volumes to remote cluster/node\n"
533 if $self->{opts}->{remote};
534
a6be63ac
FE
535 if ($self->{running}) {
536
537 my $version = PVE::QemuServer::kvm_user_version();
538 if (!min_version($version, 4, 2)) {
539 die "can't live migrate VM with replicated volumes, pve-qemu to old (< 4.2)!\n"
540 }
541
542 my @live_replicatable_volumes = $self->filter_local_volumes('online', 1);
543 foreach my $volid (@live_replicatable_volumes) {
544 my $drive = $local_volumes->{$volid}->{drivename};
545 die "internal error - no drive for '$volid'\n" if !defined($drive);
546
547 my $bitmap = "repl_$drive";
548
549 # start tracking before replication to get full delta + a few duplicates
550 $self->log('info', "$drive: start tracking writes using block-dirty-bitmap '$bitmap'");
551 mon_cmd($vmid, 'block-dirty-bitmap-add', node => "drive-$drive", name => $bitmap);
552
553 # other info comes from target node in phase 2
554 $self->{target_drive}->{$drive}->{bitmap} = $bitmap;
555 }
556 }
557 $self->log('info', "replicating disk images");
558
559 my $start_time = time();
560 my $logfunc = sub { $self->log('info', shift) };
561 my $actual_replicated_volumes = PVE::Replication::run_replication(
562 'PVE::QemuConfig', $self->{replication_jobcfg}, $start_time, $start_time, $logfunc);
563
564 # extra safety check
565 my @replicated_volumes = $self->filter_local_volumes(undef, 1);
566 foreach my $volid (@replicated_volumes) {
567 die "expected volume '$volid' to get replicated, but it wasn't\n"
568 if !$actual_replicated_volumes->{$volid};
569 }
570}
571
3276a434
FE
572sub config_update_local_disksizes {
573 my ($self) = @_;
574
575 my $conf = $self->{vmconf};
576 my $local_volumes = $self->{local_volumes};
577
578 PVE::QemuConfig->foreach_volume($conf, sub {
579 my ($key, $drive) = @_;
f9dde219
SR
580 # skip special disks, will be handled later
581 return if $key eq 'efidisk0';
582 return if $key eq 'tpmstate0';
3276a434
FE
583
584 my $volid = $drive->{file};
585 return if !defined($local_volumes->{$volid}); # only update sizes for local volumes
586
587 my ($updated, $msg) = PVE::QemuServer::Drive::update_disksize($drive, $local_volumes->{$volid}->{size});
588 if (defined($updated)) {
589 $conf->{$key} = PVE::QemuServer::print_drive($updated);
590 $self->log('info', "drive '$key': $msg");
591 }
592 });
593
594 # we want to set the efidisk size in the config to the size of the
595 # real OVMF_VARS.fd image, else we can create a too big image, which does not work
596 if (defined($conf->{efidisk0})) {
597 PVE::QemuServer::update_efidisk_size($conf);
598 }
f9dde219
SR
599
600 # TPM state might have an irregular filesize, to avoid problems on transfer
601 # we always assume the static size of 4M to allocate on the target
602 if (defined($conf->{tpmstate0})) {
603 PVE::QemuServer::update_tpmstate_size($conf);
604 }
3276a434
FE
605}
606
d10b78f4 607sub filter_local_volumes {
4b26ffbf 608 my ($self, $migration_mode, $replicated) = @_;
d10b78f4
FE
609
610 my $volumes = $self->{local_volumes};
611 my @filtered_volids;
612
613 foreach my $volid (sort keys %{$volumes}) {
614 next if defined($migration_mode) && safe_string_ne($volumes->{$volid}->{migration_mode}, $migration_mode);
4b26ffbf 615 next if defined($replicated) && safe_boolean_ne($volumes->{$volid}->{replicated}, $replicated);
d10b78f4
FE
616 push @filtered_volids, $volid;
617 }
618
619 return @filtered_volids;
620}
621
622sub sync_offline_local_volumes {
623 my ($self) = @_;
624
625 my $local_volumes = $self->{local_volumes};
4b26ffbf 626 my @volids = $self->filter_local_volumes('offline', 0);
d10b78f4
FE
627
628 my $storecfg = $self->{storecfg};
629 my $opts = $self->{opts};
630
631 $self->log('info', "copying local disk images") if scalar(@volids);
632
633 foreach my $volid (@volids) {
eef93bc5 634 my $new_volid;
d10b78f4 635
eef93bc5
FG
636 my $opts = $self->{opts};
637 if ($opts->{remote}) {
638 my $log = sub {
639 my ($level, $msg) = @_;
640 $self->log($level, $msg);
641 };
642
643 $new_volid = PVE::StorageTunnel::storage_migrate(
644 $self->{tunnel},
645 $storecfg,
646 $volid,
647 $self->{vmid},
648 $opts->{remote}->{vmid},
649 $local_volumes->{$volid},
650 $log,
651 );
652 } else {
653 my $targetsid = $local_volumes->{$volid}->{targetsid};
654
655 my $bwlimit = $local_volumes->{$volid}->{bwlimit};
656 $bwlimit = $bwlimit * 1024 if defined($bwlimit); # storage_migrate uses bps
657
658 my $storage_migrate_opts = {
659 'ratelimit_bps' => $bwlimit,
660 'insecure' => $opts->{migration_type} eq 'insecure',
661 'with_snapshots' => $local_volumes->{$volid}->{snapshots},
662 'allow_rename' => !$local_volumes->{$volid}->{is_vmstate},
663 };
664
665 my $logfunc = sub { $self->log('info', $_[0]); };
666 $new_volid = eval {
667 PVE::Storage::storage_migrate(
668 $storecfg,
669 $volid,
670 $self->{ssh_info},
671 $targetsid,
672 $storage_migrate_opts,
673 $logfunc,
674 );
675 };
676 if (my $err = $@) {
677 die "storage migration for '$volid' to storage '$targetsid' failed - $err\n";
678 }
d10b78f4
FE
679 }
680
681 $self->{volume_map}->{$volid} = $new_volid;
682 $self->log('info', "volume '$volid' is '$new_volid' on the target\n");
683
684 eval { PVE::Storage::deactivate_volumes($storecfg, [$volid]); };
685 if (my $err = $@) {
686 $self->log('warn', $err);
687 }
688 }
3ea94c60
DM
689}
690
b74cad8a
AD
691sub cleanup_remotedisks {
692 my ($self) = @_;
693
eef93bc5
FG
694 if ($self->{opts}->{remote}) {
695 PVE::Tunnel::finish_tunnel($self->{tunnel}, 1);
696 delete $self->{tunnel};
697 return;
698 }
699
4b26ffbf
FE
700 my $local_volumes = $self->{local_volumes};
701
eb5751ba 702 foreach my $volid (values %{$self->{volume_map}}) {
9b6efe43 703 # don't clean up replicated disks!
4b26ffbf 704 next if $local_volumes->{$volid}->{replicated};
b74cad8a 705
eb5751ba 706 my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
b74cad8a
AD
707
708 my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
709
710 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
711 if (my $err = $@) {
712 $self->log('err', $err);
713 $self->{errors} = 1;
714 }
715 }
716}
717
9b6efe43
FG
718sub cleanup_bitmaps {
719 my ($self) = @_;
7f5fb49a 720 foreach my $drive (keys %{$self->{target_drive}}) {
9b6efe43
FG
721 my $bitmap = $self->{target_drive}->{$drive}->{bitmap};
722 next if !$bitmap;
723 $self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'");
724 mon_cmd($self->{vmid}, 'block-dirty-bitmap-remove', node => "drive-$drive", name => $bitmap);
725 }
726}
727
1e3baf05 728sub phase1 {
16e903f2 729 my ($self, $vmid) = @_;
1e3baf05 730
16e903f2 731 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
1e3baf05 732
16e903f2 733 my $conf = $self->{vmconf};
1e3baf05
DM
734
735 # set migrate lock in config file
1858638f 736 $conf->{lock} = 'migrate';
ffda963f 737 PVE::QemuConfig->write_config($vmid, $conf);
1e3baf05 738
d10b78f4 739 $self->scan_local_volumes($vmid);
3276a434
FE
740
741 # fix disk sizes to match their actual size and write changes,
742 # so that the target allocates the correct volumes
743 $self->config_update_local_disksizes();
68b108ee 744 PVE::QemuConfig->write_config($vmid, $conf);
d10b78f4 745
a6be63ac
FE
746 $self->handle_replication($vmid);
747
d10b78f4 748 $self->sync_offline_local_volumes();
eef93bc5 749 $self->phase1_remote($vmid) if $self->{opts}->{remote};
1e3baf05
DM
750};
751
eef93bc5
FG
752sub map_bridges {
753 my ($conf, $map, $scan_only) = @_;
754
755 my $bridges = {};
756
757 foreach my $opt (keys %$conf) {
758 next if $opt !~ m/^net\d+$/;
759
760 next if !$conf->{$opt};
761 my $d = PVE::QemuServer::parse_net($conf->{$opt});
762 next if !$d || !$d->{bridge};
763
764 my $target_bridge = PVE::JSONSchema::map_id($map, $d->{bridge});
765 $bridges->{$target_bridge}->{$opt} = $d->{bridge};
766
767 next if $scan_only;
768
769 $d->{bridge} = $target_bridge;
770 $conf->{$opt} = PVE::QemuServer::print_net($d);
771 }
772
773 return $bridges;
774}
775
776sub phase1_remote {
777 my ($self, $vmid) = @_;
778
779 my $remote_conf = PVE::QemuConfig->load_config($vmid);
780 PVE::QemuConfig->update_volume_ids($remote_conf, $self->{volume_map});
781
782 my $bridges = map_bridges($remote_conf, $self->{opts}->{bridgemap});
783 for my $target (keys $bridges->%*) {
784 for my $nic (keys $bridges->{$target}->%*) {
785 $self->log('info', "mapped: $nic from $bridges->{$target}->{$nic} to $target");
786 }
787 }
788
789 my @online_local_volumes = $self->filter_local_volumes('online');
790
791 my $storage_map = $self->{opts}->{storagemap};
792 $self->{nbd} = {};
793 PVE::QemuConfig->foreach_volume($remote_conf, sub {
794 my ($ds, $drive) = @_;
795
796 # TODO eject CDROM?
797 return if PVE::QemuServer::drive_is_cdrom($drive);
798
799 my $volid = $drive->{file};
800 return if !$volid;
801
802 return if !grep { $_ eq $volid} @online_local_volumes;
803
804 my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
805 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
806 my $source_format = PVE::QemuServer::qemu_img_format($scfg, $volname);
807
808 # set by target cluster
809 my $oldvolid = delete $drive->{file};
810 delete $drive->{format};
811
812 my $targetsid = PVE::JSONSchema::map_id($storage_map, $storeid);
813
814 my $params = {
815 format => $source_format,
816 storage => $targetsid,
817 drive => $drive,
818 };
819
820 $self->log('info', "Allocating volume for drive '$ds' on remote storage '$targetsid'..");
821 my $res = PVE::Tunnel::write_tunnel($self->{tunnel}, 600, 'disk', $params);
822
823 $self->log('info', "volume '$oldvolid' is '$res->{volid}' on the target\n");
824 $remote_conf->{$ds} = $res->{drivestr};
825 $self->{nbd}->{$ds} = $res;
826 });
827
828 my $conf_str = PVE::QemuServer::write_vm_config("remote", $remote_conf);
829
830 # TODO expose in PVE::Firewall?
831 my $vm_fw_conf_path = "/etc/pve/firewall/$vmid.fw";
832 my $fw_conf_str;
833 $fw_conf_str = PVE::Tools::file_get_contents($vm_fw_conf_path)
834 if -e $vm_fw_conf_path;
835 my $params = {
836 conf => $conf_str,
837 'firewall-config' => $fw_conf_str,
838 };
839
840 PVE::Tunnel::write_tunnel($self->{tunnel}, 10, 'config', $params);
841}
842
16e903f2
DM
843sub phase1_cleanup {
844 my ($self, $vmid, $err) = @_;
845
846 $self->log('info', "aborting phase 1 - cleanup resources");
847
1858638f
DM
848 my $conf = $self->{vmconf};
849 delete $conf->{lock};
ffda963f 850 eval { PVE::QemuConfig->write_config($vmid, $conf) };
16e903f2
DM
851 if (my $err = $@) {
852 $self->log('err', $err);
853 }
f5eb281a 854
eb5751ba
FE
855 eval { $self->cleanup_remotedisks() };
856 if (my $err = $@) {
857 $self->log('err', $err);
16e903f2 858 }
9b6efe43
FG
859
860 eval { $self->cleanup_bitmaps() };
861 if (my $err =$@) {
862 $self->log('err', $err);
863 }
16e903f2
DM
864}
865
05b2a4ae
FG
866sub phase2_start_local_cluster {
867 my ($self, $vmid, $params) = @_;
1e3baf05 868
16e903f2 869 my $conf = $self->{vmconf};
c3417e3b 870 my $local_volumes = $self->{local_volumes};
efe0d457
FE
871 my @online_local_volumes = $self->filter_local_volumes('online');
872
05b2a4ae
FG
873 my $start = $params->{start_params};
874 my $migrate = $params->{migrate_opts};
16e903f2 875
46a84fd4 876 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
1e3baf05 877
05b2a4ae 878 my $tunnel_info = {};
7e8dcf2c 879
19672434 880 ## start on remote node
95a4b4a9
AD
881 my $cmd = [@{$self->{rem_ssh}}];
882
05b2a4ae
FG
883 push @$cmd, 'qm', 'start', $vmid;
884
885 if ($start->{skiplock}) {
886 push @$cmd, '--skiplock';
95a4b4a9
AD
887 }
888
05b2a4ae 889 push @$cmd, '--migratedfrom', $migrate->{migratedfrom};
1c9d54bf 890
05b2a4ae 891 push @$cmd, '--migration_type', $migrate->{type};
2de2d6f7 892
05b2a4ae
FG
893 push @$cmd, '--migration_network', $migrate->{network}
894 if $migrate->{network};
2de2d6f7 895
05b2a4ae 896 push @$cmd, '--stateuri', $start->{statefile};
2de2d6f7 897
05b2a4ae
FG
898 if ($start->{forcemachine}) {
899 push @$cmd, '--machine', $start->{forcemachine};
42668529
DM
900 }
901
05b2a4ae
FG
902 if ($start->{forcecpu}) {
903 push @$cmd, '--force-cpu', $start->{forcecpu};
58c64ad5
SR
904 }
905
efe0d457 906 if ($self->{storage_migration}) {
4530494b 907 push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1');
b74cad8a
AD
908 }
909
86b8228b 910 my $spice_port;
05b2a4ae 911 my $input = "nbd_protocol_version: $migrate->{nbd_proto_version}\n";
fd95d780 912
13d121d7
FE
913 my @offline_local_volumes = $self->filter_local_volumes('offline');
914 for my $volid (@offline_local_volumes) {
915 my $drivename = $local_volumes->{$volid}->{drivename};
916 next if !$drivename || !$conf->{$drivename};
917
918 my $new_volid = $self->{volume_map}->{$volid};
919 next if !$new_volid || $volid eq $new_volid;
920
921 # FIXME PVE 8.x only use offline_volume variant once all targets can handle it
922 if ($drivename eq 'tpmstate0') {
923 $input .= "$drivename: $new_volid\n"
924 } else {
925 $input .= "offline_volume: $drivename: $new_volid\n"
926 }
fd95d780
FG
927 }
928
05b2a4ae 929 $input .= "spice_ticket: $migrate->{spice_ticket}\n" if $migrate->{spice_ticket};
cee620e6 930
4b26ffbf
FE
931 my @online_replicated_volumes = $self->filter_local_volumes('online', 1);
932 foreach my $volid (@online_replicated_volumes) {
efe0d457 933 $input .= "replicated_volume: $volid\n";
88126be3
FG
934 }
935
efbbe59d
FE
936 my $handle_storage_migration_listens = sub {
937 my ($drive_key, $drivestr, $nbd_uri) = @_;
938
939 $self->{stopnbd} = 1;
940 $self->{target_drive}->{$drive_key}->{drivestr} = $drivestr;
941 $self->{target_drive}->{$drive_key}->{nbd_uri} = $nbd_uri;
942
943 my $source_drive = PVE::QemuServer::parse_drive($drive_key, $conf->{$drive_key});
944 my $target_drive = PVE::QemuServer::parse_drive($drive_key, $drivestr);
945 my $source_volid = $source_drive->{file};
946 my $target_volid = $target_drive->{file};
947
948 $self->{volume_map}->{$source_volid} = $target_volid;
949 $self->log('info', "volume '$source_volid' is '$target_volid' on the target\n");
950 };
951
88126be3 952 my $target_replicated_volumes = {};
86b8228b 953
7c14dcae
DM
954 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
955 # instead we pipe it through STDIN
7827de41 956 my $exitcode = PVE::Tools::run_command($cmd, input => $input, outfunc => sub {
1e3baf05
DM
957 my $line = shift;
958
05b2a4ae
FG
959 if ($line =~ m/^migration listens on (tcp):(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
960 $tunnel_info->{addr} = $2;
961 $tunnel_info->{port} = int($3);
962 $tunnel_info->{proto} = $1;
1c9d54bf 963 }
05b2a4ae
FG
964 elsif ($line =~ m!^migration listens on (unix):(/run/qemu-server/(\d+)\.migrate)$!) {
965 $tunnel_info->{addr} = $2;
966 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $3;
967 $tunnel_info->{proto} = $1;
5bc1e039
SP
968 }
969 elsif ($line =~ m/^migration listens on port (\d+)$/) {
05b2a4ae
FG
970 $tunnel_info->{addr} = "localhost";
971 $tunnel_info->{port} = int($1);
972 $tunnel_info->{proto} = "tcp";
5bc1e039 973 }
f3a483b6 974 elsif ($line =~ m/^spice listens on port (\d+)$/) {
86b8228b 975 $spice_port = int($1);
1e3baf05 976 }
769f187d 977 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
8b02e568 978 my $drivestr = $4;
b74cad8a
AD
979 my $nbd_uri = "nbd:$1:$2:exportname=$3";
980 my $targetdrive = $3;
981 $targetdrive =~ s/drive-//g;
982
efbbe59d 983 $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri);
7827de41
ML
984 } elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) {
985 my $drivestr = $4;
986 die "Destination UNIX socket's VMID does not match source VMID" if $vmid ne $2;
987 my $nbd_unix_addr = $1;
988 my $nbd_uri = "nbd:unix:$nbd_unix_addr:exportname=$3";
989 my $targetdrive = $3;
990 $targetdrive =~ s/drive-//g;
b74cad8a 991
efbbe59d 992 $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri);
05b2a4ae 993 $tunnel_info->{unix_sockets}->{$nbd_unix_addr} = 1;
88126be3
FG
994 } elsif ($line =~ m/^re-using replicated volume: (\S+) - (.*)$/) {
995 my $drive = $1;
996 my $volid = $2;
997 $target_replicated_volumes->{$volid} = $drive;
8bf30c2a
SR
998 } elsif ($line =~ m/^QEMU: (.*)$/) {
999 $self->log('info', "[$self->{node}] $1\n");
b74cad8a 1000 }
ab399b7c
AD
1001 }, errfunc => sub {
1002 my $line = shift;
8bf30c2a 1003 $self->log('info', "[$self->{node}] $line");
6e0216d8
SR
1004 }, noerr => 1);
1005
1006 die "remote command failed with exit code $exitcode\n" if $exitcode;
1e3baf05 1007
05b2a4ae 1008 die "unable to detect remote migration address\n" if !$tunnel_info->{addr} || !$tunnel_info->{proto};
1ef75254 1009
4b26ffbf 1010 if (scalar(keys %$target_replicated_volumes) != scalar(@online_replicated_volumes)) {
88126be3
FG
1011 die "number of replicated disks on source and target node do not match - target node too old?\n"
1012 }
1013
05b2a4ae
FG
1014 return ($tunnel_info, $spice_port);
1015}
1016
eef93bc5
FG
1017sub phase2_start_remote_cluster {
1018 my ($self, $vmid, $params) = @_;
1019
1020 die "insecure migration to remote cluster not implemented\n"
1021 if $params->{migrate_opts}->{type} ne 'websocket';
1022
1023 my $remote_vmid = $self->{opts}->{remote}->{vmid};
1024
1025 # like regular start but with some overhead accounted for
1026 my $timeout = PVE::QemuServer::Helpers::config_aware_timeout($self->{vmconf}) + 10;
1027
1028 my $res = PVE::Tunnel::write_tunnel($self->{tunnel}, $timeout, "start", $params);
1029
1030 foreach my $drive (keys %{$res->{drives}}) {
1031 $self->{stopnbd} = 1;
1032 $self->{target_drive}->{$drive}->{drivestr} = $res->{drives}->{$drive}->{drivestr};
1033 my $nbd_uri = $res->{drives}->{$drive}->{nbd_uri};
1034 die "unexpected NBD uri for '$drive': $nbd_uri\n"
1035 if $nbd_uri !~ s!/run/qemu-server/$remote_vmid\_!/run/qemu-server/$vmid\_!;
1036
1037 $self->{target_drive}->{$drive}->{nbd_uri} = $nbd_uri;
1038 }
1039
1040 return ($res->{migrate}, $res->{spice_port});
1041}
1042
05b2a4ae
FG
1043sub phase2 {
1044 my ($self, $vmid) = @_;
1045
1046 my $conf = $self->{vmconf};
eef93bc5 1047 my $local_volumes = $self->{local_volumes};
05b2a4ae
FG
1048
1049 # version > 0 for unix socket support
1050 my $nbd_protocol_version = 1;
1051
1052 my $spice_ticket;
1053 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
1054 my $res = mon_cmd($vmid, 'query-spice');
1055 $spice_ticket = $res->{ticket};
1056 }
1057
1058 my $migration_type = $self->{opts}->{migration_type};
1059 my $state_uri = $migration_type eq 'insecure' ? 'tcp' : 'unix';
1060
1061 my $params = {
1062 start_params => {
1063 statefile => $state_uri,
1064 forcemachine => $self->{forcemachine},
1065 forcecpu => $self->{forcecpu},
1066 skiplock => 1,
1067 },
1068 migrate_opts => {
1069 spice_ticket => $spice_ticket,
1070 type => $migration_type,
1071 network => $self->{opts}->{migration_network},
1072 storagemap => $self->{opts}->{storagemap},
1073 migratedfrom => PVE::INotify::nodename(),
1074 nbd_proto_version => $nbd_protocol_version,
1075 nbd => $self->{nbd},
1076 },
1077 };
1078
eef93bc5 1079 my ($tunnel_info, $spice_port);
05b2a4ae 1080
eef93bc5
FG
1081 my @online_local_volumes = $self->filter_local_volumes('online');
1082 $self->{storage_migration} = 1 if scalar(@online_local_volumes);
1083
1084 if (my $remote = $self->{opts}->{remote}) {
1085 my $remote_vmid = $remote->{vmid};
1086 $params->{migrate_opts}->{remote_node} = $self->{node};
1087 ($tunnel_info, $spice_port) = $self->phase2_start_remote_cluster($vmid, $params);
1088 die "only UNIX sockets are supported for remote migration\n"
1089 if $tunnel_info->{proto} ne 'unix';
1090
1091 my $remote_socket = $tunnel_info->{addr};
1092 my $local_socket = $remote_socket;
1093 $local_socket =~ s/$remote_vmid/$vmid/g;
1094 $tunnel_info->{addr} = $local_socket;
1095
1096 $self->log('info', "Setting up tunnel for '$local_socket'");
1097 PVE::Tunnel::forward_unix_socket($self->{tunnel}, $local_socket, $remote_socket);
1098
1099 foreach my $remote_socket (@{$tunnel_info->{unix_sockets}}) {
1100 my $local_socket = $remote_socket;
1101 $local_socket =~ s/$remote_vmid/$vmid/g;
1102 next if $self->{tunnel}->{forwarded}->{$local_socket};
1103 $self->log('info', "Setting up tunnel for '$local_socket'");
1104 PVE::Tunnel::forward_unix_socket($self->{tunnel}, $local_socket, $remote_socket);
1105 }
1106 } else {
1107 ($tunnel_info, $spice_port) = $self->phase2_start_local_cluster($vmid, $params);
1108
1109 $self->log('info', "start remote tunnel");
1110 $self->start_remote_tunnel($tunnel_info);
1111 }
05b2a4ae
FG
1112
1113 my $migrate_uri = "$tunnel_info->{proto}:$tunnel_info->{addr}";
1114 $migrate_uri .= ":$tunnel_info->{port}"
1115 if defined($tunnel_info->{port});
d296ed08 1116
efe0d457 1117 if ($self->{storage_migration}) {
b74cad8a
AD
1118 $self->{storage_migration_jobs} = {};
1119 $self->log('info', "starting storage migration");
1120
bd2d5fe6 1121 die "The number of local disks does not match between the source and the destination.\n"
efe0d457 1122 if (scalar(keys %{$self->{target_drive}}) != scalar(@online_local_volumes));
b74cad8a 1123 foreach my $drive (keys %{$self->{target_drive}}){
d189e590
SI
1124 my $target = $self->{target_drive}->{$drive};
1125 my $nbd_uri = $target->{nbd_uri};
683ab654 1126
1764fa05 1127 my $source_drive = PVE::QemuServer::parse_drive($drive, $conf->{$drive});
97ece9dd 1128 my $source_volid = $source_drive->{file};
97ece9dd 1129
05b2a4ae 1130 my $bwlimit = $self->{local_volumes}->{$source_volid}->{bwlimit};
9b6efe43 1131 my $bitmap = $target->{bitmap};
d189e590 1132
d108cb1e 1133 $self->log('info', "$drive: start migration to $nbd_uri");
9b6efe43 1134 PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 'skip', undef, $bwlimit, $bitmap);
b74cad8a
AD
1135 }
1136 }
1137
05b2a4ae 1138 $self->log('info', "starting online/live migration on $migrate_uri");
5bc1e039 1139 $self->{livemigration} = 1;
e18b0b99 1140
3beb415b
AD
1141 # load_defaults
1142 my $defaults = PVE::QemuServer::load_defaults();
1143
7de328c6
TL
1144 $self->log('info', "set migration capabilities");
1145 eval { PVE::QemuServer::set_migration_caps($vmid) };
485449e3
SR
1146 warn $@ if $@;
1147
1148 my $qemu_migrate_params = {};
1149
ddd664d7
SI
1150 # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
1151 # migrate_speed parameter in qm.conf - take the lower of the two.
eef93bc5
FG
1152 my $bwlimit = $self->get_bwlimit();
1153
2c4ba4c3 1154 my $migrate_speed = $conf->{migrate_speed} // 0;
8f43ac48 1155 $migrate_speed *= 1024; # migrate_speed is in MB/s, bwlimit in KB/s
ddd664d7 1156
2c4ba4c3
FE
1157 if ($bwlimit && $migrate_speed) {
1158 $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed;
1159 } else {
1160 $migrate_speed ||= $bwlimit;
1161 }
a89bd100 1162 $migrate_speed ||= ($defaults->{migrate_speed} || 0) * 1024;
ddd664d7 1163
a89bd100
TL
1164 if ($migrate_speed) {
1165 $migrate_speed *= 1024; # qmp takes migrate_speed in B/s.
0fca250a 1166 $self->log('info', "migration speed limit: ". render_bytes($migrate_speed, 1) ."/s");
8f43ac48
TL
1167 } else {
1168 # always set migrate speed as QEMU default to 128 MiBps == 1 Gbps, use 16 GiBps == 128 Gbps
1169 $migrate_speed = (16 << 30);
a89bd100 1170 }
8f43ac48 1171 $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
3beb415b
AD
1172
1173 my $migrate_downtime = $defaults->{migrate_downtime};
1174 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
c05f1b33
SR
1175 # migrate-set-parameters expects limit in ms
1176 $migrate_downtime *= 1000;
1177 $self->log('info', "migration downtime limit: $migrate_downtime ms");
1178 $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
3beb415b 1179
171ed95c
EK
1180 # set cachesize to 10% of the total memory
1181 my $memory = $conf->{memory} || $defaults->{memory};
1182 my $cachesize = int($memory * 1048576 / 10);
50d8dd5d
AD
1183 $cachesize = round_powerof2($cachesize);
1184
0fca250a 1185 $self->log('info', "migration cachesize: " . render_bytes($cachesize, 1));
485449e3
SR
1186 $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
1187
1188 $self->log('info', "set migration parameters");
e18b0b99 1189 eval {
485449e3 1190 mon_cmd($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
e18b0b99 1191 };
485449e3 1192 $self->log('info', "migrate-set-parameters error: $@") if $@;
f34d1466 1193
eef93bc5 1194 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && !$self->{opts}->{remote}) {
95a4b4a9
AD
1195 my $rpcenv = PVE::RPCEnvironment::get();
1196 my $authuser = $rpcenv->get_user();
1197
86b8228b 1198 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
95a4b4a9 1199
86b8228b 1200 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
769f187d 1201 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
95a4b4a9
AD
1202
1203 $self->log('info', "spice client_migrate_info");
1204
1205 eval {
0a13e08e 1206 mon_cmd($vmid, "client_migrate_info", protocol => 'spice',
ccab68c2 1207 hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
86b8228b 1208 'cert-subject' => $subject);
95a4b4a9
AD
1209 };
1210 $self->log('info', "client_migrate_info error: $@") if $@;
1211
1212 }
1213
9938d24d
FE
1214 my $start = time();
1215
05b2a4ae 1216 $self->log('info', "start migrate command to $migrate_uri");
5a7835f5 1217 eval {
05b2a4ae 1218 mon_cmd($vmid, "migrate", uri => $migrate_uri);
5a7835f5
AD
1219 };
1220 my $merr = $@;
05b2a4ae 1221 $self->log('info', "migrate uri => $migrate_uri failed: $merr") if $merr;
1e3baf05 1222
e693c491 1223 my $last_mem_transferred = 0;
4305207d 1224 my $usleep = 1000000;
e52bd94c 1225 my $i = 0;
b0b756c1 1226 my $err_count = 0;
865ef132
SP
1227 my $lastrem = undef;
1228 my $downtimecounter = 0;
1e3baf05 1229 while (1) {
e52bd94c 1230 $i++;
e693c491 1231 my $avglstat = $last_mem_transferred ? $last_mem_transferred / $i : 0;
e52bd94c 1232
b0b756c1 1233 usleep($usleep);
6539865a
TL
1234
1235 my $stat = eval { mon_cmd($vmid, "query-migrate") };
b0b756c1
DM
1236 if (my $err = $@) {
1237 $err_count++;
1238 warn "query migrate failed: $err\n";
f34d1466 1239 $self->log('info', "query migrate failed: $err");
b0b756c1 1240 if ($err_count <= 5) {
6539865a 1241 usleep(1_000_000);
b0b756c1
DM
1242 next;
1243 }
1244 die "too many query migrate failures - aborting\n";
1245 }
985a5f48 1246
6539865a
TL
1247 my $status = $stat->{status};
1248 if (defined($status) && $status =~ m/^(setup)$/im) {
1249 sleep(1);
1250 next;
1251 }
f5eb281a 1252
6539865a
TL
1253 if (!defined($status) || $status !~ m/^(active|completed|failed|cancelled)$/im) {
1254 die $merr if $merr;
1255 die "unable to parse migration status '$status' - aborting\n";
1256 }
1257 $merr = undef;
1258 $err_count = 0;
1259
e693c491
TL
1260 my $memstat = $stat->{ram};
1261
6539865a
TL
1262 if ($status eq 'completed') {
1263 my $delay = time() - $start;
1264 if ($delay > 0) {
0fca250a
TL
1265 my $total = $memstat->{total} || 0;
1266 my $avg_speed = render_bytes($total / $delay, 1);
6539865a 1267 my $downtime = $stat->{downtime} || 0;
0fca250a 1268 $self->log('info', "average migration speed: $avg_speed/s - downtime $downtime ms");
1e3baf05 1269 }
6539865a 1270 }
1e3baf05 1271
6539865a 1272 if ($status eq 'failed' || $status eq 'cancelled') {
55d07411
FE
1273 my $message = $stat->{'error-desc'} ? "$status - $stat->{'error-desc'}" : $status;
1274 $self->log('info', "migration status error: $message");
6539865a
TL
1275 die "aborting\n"
1276 }
a05b47a8 1277
6539865a
TL
1278 if ($status ne 'active') {
1279 $self->log('info', "migration status: $status");
1280 last;
1281 }
2e787b18 1282
e693c491
TL
1283 if ($memstat->{transferred} ne $last_mem_transferred) {
1284 my $trans = $memstat->{transferred} || 0;
1285 my $rem = $memstat->{remaining} || 0;
1286 my $total = $memstat->{total} || 0;
0fca250a
TL
1287 my $speed = ($memstat->{'pages-per-second'} // 0) * ($memstat->{'page-size'} // 0);
1288 my $dirty_rate = ($memstat->{'dirty-pages-rate'} // 0) * ($memstat->{'page-size'} // 0);
a05b47a8 1289
6539865a
TL
1290 # reduce sleep if remainig memory is lower than the average transfer speed
1291 $usleep = 100_000 if $avglstat && $rem < $avglstat;
865ef132 1292
b68a957b
TL
1293 # also reduce loggin if we poll more frequent
1294 my $should_log = $usleep > 100_000 ? 1 : ($i % 10) == 0;
370b05e7 1295
0fca250a
TL
1296 my $total_h = render_bytes($total, 1);
1297 my $transferred_h = render_bytes($trans, 1);
1298 my $speed_h = render_bytes($speed, 1);
1299
1300 my $progress = "transferred $transferred_h of $total_h VM-state, ${speed_h}/s";
1301
1302 if ($dirty_rate > $speed) {
1303 my $dirty_rate_h = render_bytes($dirty_rate, 1);
1304 $progress .= ", VM dirties lots of memory: $dirty_rate_h/s";
1305 }
1306
b68a957b 1307 $self->log('info', "migration $status, $progress") if $should_log;
0fca250a
TL
1308
1309 my $xbzrle = $stat->{"xbzrle-cache"} || {};
1310 my ($xbzrlebytes, $xbzrlepages) = $xbzrle->@{'bytes', 'pages'};
1311 if ($xbzrlebytes || $xbzrlepages) {
1312 my $bytes_h = render_bytes($xbzrlebytes, 1);
1313
1314 my $msg = "send updates to $xbzrlepages pages in $bytes_h encoded memory";
1315
1316 $msg .= sprintf(", cache-miss %.2f%%", $xbzrle->{'cache-miss-rate'} * 100)
1317 if $xbzrle->{'cache-miss-rate'};
1318
1319 $msg .= ", overflow $xbzrle->{overflow}" if $xbzrle->{overflow};
1320
b68a957b 1321 $self->log('info', "xbzrle: $msg") if $should_log;
6539865a
TL
1322 }
1323
e693c491 1324 if (($lastrem && $rem > $lastrem) || ($rem == 0)) {
6539865a
TL
1325 $downtimecounter++;
1326 }
1327 $lastrem = $rem;
1328
1329 if ($downtimecounter > 5) {
1330 $downtimecounter = 0;
1331 $migrate_downtime *= 2;
1332 $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms");
1333 eval {
1334 # migrate-set-parameters does not touch values not
1335 # specified, so this only changes downtime-limit
1336 mon_cmd($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime));
1337 };
1338 $self->log('info', "migrate-set-parameters error: $@") if $@;
1339 }
1e3baf05 1340 }
6539865a 1341
e693c491 1342 $last_mem_transferred = $memstat->{transferred};
a05b47a8 1343 }
0783c3c2
FE
1344
1345 if ($self->{storage_migration}) {
1346 # finish block-job with block-job-cancel, to disconnect source VM from NBD
1347 # to avoid it trying to re-establish it. We are in blockjob ready state,
1348 # thus, this command changes to it to blockjob complete (see qapi docs)
1349 eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, 'cancel'); };
1350 if (my $err = $@) {
1351 die "Failed to complete storage migration: $err\n";
1352 }
1353 }
1e3baf05 1354}
16e903f2 1355
c04b5b04
AD
1356sub phase2_cleanup {
1357 my ($self, $vmid, $err) = @_;
1358
af30308f
DM
1359 return if !$self->{errors};
1360 $self->{phase2errors} = 1;
1361
c04b5b04
AD
1362 $self->log('info', "aborting phase 2 - cleanup resources");
1363
19168b91
SP
1364 $self->log('info', "migrate_cancel");
1365 eval {
0a13e08e 1366 mon_cmd($vmid, "migrate_cancel");
19168b91
SP
1367 };
1368 $self->log('info', "migrate_cancel error: $@") if $@;
1369
8a0d269b
FE
1370 my $vm_status = eval {
1371 mon_cmd($vmid, 'query-status')->{status} or die "no 'status' in result\n";
1372 };
1373 $self->log('err', "query-status error: $@") if $@;
1374
1375 # Can end up in POSTMIGRATE state if failure occurred after convergence. Try going back to
1376 # original state. Unfortunately, direct transition from POSTMIGRATE to PAUSED is not possible.
1377 if ($vm_status && $vm_status eq 'postmigrate') {
1378 if (!$self->{vm_was_paused}) {
1379 eval { mon_cmd($vmid, 'cont'); };
1380 $self->log('err', "resuming VM failed: $@") if $@;
1381 } else {
1382 $self->log('err', "VM was paused, but ended in postmigrate state");
1383 }
1384 }
1385
c04b5b04
AD
1386 my $conf = $self->{vmconf};
1387 delete $conf->{lock};
ffda963f 1388 eval { PVE::QemuConfig->write_config($vmid, $conf) };
c04b5b04
AD
1389 if (my $err = $@) {
1390 $self->log('err', $err);
1391 }
1392
af30308f 1393 # cleanup ressources on target host
3b4cf0f0 1394 if ($self->{storage_migration}) {
b74cad8a
AD
1395 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
1396 if (my $err = $@) {
1397 $self->log('err', $err);
1398 }
9b3f5a5c 1399 }
b74cad8a 1400
9b3f5a5c
FG
1401 eval { $self->cleanup_bitmaps() };
1402 if (my $err =$@) {
1403 $self->log('err', $err);
b74cad8a
AD
1404 }
1405
af30308f 1406 my $nodename = PVE::INotify::nodename();
370b05e7 1407
eef93bc5
FG
1408 if ($self->{tunnel} && $self->{tunnel}->{version} >= 2) {
1409 PVE::Tunnel::write_tunnel($self->{tunnel}, 10, 'stop');
1410 } else {
1411 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
1412 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1413 if (my $err = $@) {
1414 $self->log('err', $err);
1415 $self->{errors} = 1;
1416 }
af30308f 1417 }
386c6ba7 1418
9b3f5a5c
FG
1419 # cleanup after stopping, otherwise disks might be in-use by target VM!
1420 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
1421 if (my $err = $@) {
1422 $self->log('err', $err);
1423 }
1424
1425
386c6ba7 1426 if ($self->{tunnel}) {
e594231b 1427 eval { PVE::Tunnel::finish_tunnel($self->{tunnel}); };
386c6ba7
WL
1428 if (my $err = $@) {
1429 $self->log('err', $err);
1430 $self->{errors} = 1;
1431 }
1432 }
c04b5b04
AD
1433}
1434
16e903f2
DM
1435sub phase3 {
1436 my ($self, $vmid) = @_;
f5eb281a 1437
ad8b9d5e 1438 return;
16e903f2
DM
1439}
1440
1441sub phase3_cleanup {
1442 my ($self, $vmid, $err) = @_;
1443
1444 my $conf = $self->{vmconf};
af30308f 1445 return if $self->{phase2errors};
16e903f2 1446
1d5aaa1d
FG
1447 my $tunnel = $self->{tunnel};
1448
eef93bc5 1449 if ($self->{volume_map} && !$self->{opts}->{remote}) {
38311a1d
TL
1450 my $target_drives = $self->{target_drive};
1451
1452 # FIXME: for NBD storage migration we now only update the volid, and
1453 # not the full drivestr from the target node. Workaround that until we
1454 # got some real rescan, to avoid things like wrong format in the drive
1455 delete $conf->{$_} for keys %$target_drives;
97ece9dd 1456 PVE::QemuConfig->update_volume_ids($conf, $self->{volume_map});
38311a1d
TL
1457
1458 for my $drive (keys %$target_drives) {
1459 $conf->{$drive} = $target_drives->{$drive}->{drivestr};
1460 }
37666e4c
FE
1461 PVE::QemuConfig->write_config($vmid, $conf);
1462 }
1463
dbc9420b 1464 # transfer replication state before move config
eef93bc5
FG
1465 if (!$self->{opts}->{remote}) {
1466 $self->transfer_replication_state() if $self->{is_replicated};
1467 PVE::QemuConfig->move_config_to_node($vmid, $self->{node});
1468 $self->switch_replication_job_target() if $self->{is_replicated};
1469 }
dbc9420b 1470
5bc1e039 1471 if ($self->{livemigration}) {
3e802221
TL
1472 if ($self->{stopnbd}) {
1473 $self->log('info', "stopping NBD storage migration server on target.");
504105c6 1474 # stop nbd server on remote vm - requirement for resume since 2.9
eef93bc5
FG
1475 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 2) {
1476 PVE::Tunnel::write_tunnel($tunnel, 30, 'nbdstop');
1477 } else {
1478 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
504105c6 1479
eef93bc5
FG
1480 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1481 if (my $err = $@) {
1482 $self->log('err', $err);
1483 $self->{errors} = 1;
1484 }
504105c6
FG
1485 }
1486 }
1d5aaa1d 1487
73ed6496
AD
1488 # deletes local FDB entries if learning is disabled, they'll be re-added on target on resume
1489 PVE::QemuServer::del_nets_bridge_fdb($conf, $vmid);
1490
a183576e
FE
1491 if (!$self->{vm_was_paused}) {
1492 # config moved and nbd server stopped - now we can resume vm on target
1493 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) {
eef93bc5 1494 my $cmd = $tunnel->{version} == 1 ? "resume $vmid" : "resume";
a183576e 1495 eval {
eef93bc5 1496 PVE::Tunnel::write_tunnel($tunnel, 30, $cmd);
a183576e
FE
1497 };
1498 if (my $err = $@) {
1499 $self->log('err', $err);
1500 $self->{errors} = 1;
1501 }
1502 } else {
a20dc58a 1503 # nocheck in case target node hasn't processed the config move/rename yet
a183576e
FE
1504 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
1505 my $logf = sub {
1506 my $line = shift;
1507 $self->log('err', $line);
1508 };
1509 eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); };
1510 if (my $err = $@) {
1511 $self->log('err', $err);
1512 $self->{errors} = 1;
1513 }
1d5aaa1d 1514 }
0028391f 1515 }
ca662131 1516
0028391f
FE
1517 if (
1518 $self->{storage_migration}
1519 && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks}
1520 && $self->{running}
1521 ) {
1522 if (!$self->{vm_was_paused}) {
1523 $self->log('info', "issuing guest fstrim");
eef93bc5
FG
1524 if ($self->{opts}->{remote}) {
1525 PVE::Tunnel::write_tunnel($self->{tunnel}, 600, 'fstrim');
1526 } else {
1527 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
1528 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1529 if (my $err = $@) {
1530 $self->log('err', "fstrim failed - $err");
1531 $self->{errors} = 1;
1532 }
0028391f
FE
1533 }
1534 } else {
1535 $self->log('info', "skipping guest fstrim, because VM is paused");
a183576e 1536 }
ca662131 1537 }
b67900f1
AD
1538 }
1539
2e7fee87 1540 # close tunnel on successful migration, on error phase2_cleanup closed it
eef93bc5 1541 if ($tunnel && $tunnel->{version} == 1) {
e594231b 1542 eval { PVE::Tunnel::finish_tunnel($tunnel); };
2e7fee87
FG
1543 if (my $err = $@) {
1544 $self->log('err', $err);
1545 $self->{errors} = 1;
1546 }
eef93bc5
FG
1547 $tunnel = undef;
1548 delete $self->{tunnel};
2e7fee87
FG
1549 }
1550
fd8469f7 1551 eval {
fd8469f7
AD
1552 my $timer = 0;
1553 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
1554 $self->log('info', "Waiting for spice server migration");
1555 while (1) {
0a13e08e 1556 my $res = mon_cmd($vmid, 'query-spice');
fd8469f7
AD
1557 last if int($res->{'migrated'}) == 1;
1558 last if $timer > 50;
1559 $timer ++;
1560 usleep(200000);
769f187d 1561 }
fd8469f7
AD
1562 }
1563 };
95a4b4a9 1564
a20dc58a 1565 # always stop local VM with nocheck, since config is moved already
16e903f2
DM
1566 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
1567 if (my $err = $@) {
1568 $self->log('err', "stopping vm failed - $err");
1569 $self->{errors} = 1;
1570 }
1571
1572 # always deactivate volumes - avoid lvm LVs to be active on several nodes
1573 eval {
1574 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
1575 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
1576 };
1577 if (my $err = $@) {
1578 $self->log('err', $err);
1579 $self->{errors} = 1;
1580 }
1581
4b26ffbf 1582 my @not_replicated_volumes = $self->filter_local_volumes(undef, 0);
9b6efe43 1583
4b26ffbf
FE
1584 # destroy local copies
1585 foreach my $volid (@not_replicated_volumes) {
eef93bc5
FG
1586 # remote is cleaned up below
1587 next if $self->{opts}->{remote};
1588
ad8b9d5e
FE
1589 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
1590 if (my $err = $@) {
1591 $self->log('err', "removing local copy of '$volid' failed - $err");
1592 $self->{errors} = 1;
1593 last if $err =~ /^interrupted by signal$/;
b74cad8a 1594 }
b74cad8a
AD
1595 }
1596
16e903f2 1597 # clear migrate lock
eef93bc5
FG
1598 if ($tunnel && $tunnel->{version} >= 2) {
1599 PVE::Tunnel::write_tunnel($tunnel, 10, "unlock");
1600
1601 PVE::Tunnel::finish_tunnel($tunnel);
1602 } else {
1603 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
1604 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
1605 }
1606
1607 if ($self->{opts}->{remote} && $self->{opts}->{delete}) {
1608 eval { PVE::QemuServer::destroy_vm($self->{storecfg}, $vmid, 1, undef, 0) };
1609 warn "Failed to remove source VM - $@\n" if $@;
1610 }
16e903f2
DM
1611}
1612
1613sub final_cleanup {
1614 my ($self, $vmid) = @_;
1615
1616 # nothing to do
1617}
1618
50d8dd5d
AD
1619sub round_powerof2 {
1620 return 1 if $_[0] < 2;
1621 return 2 << int(log($_[0]-1)/log(2));
1622}
1623
16e903f2 16241;