]> git.proxmox.com Git - qemu-server.git/blame - PVE/QemuMigrate.pm
tests: add migration test for pending disk
[qemu-server.git] / PVE / QemuMigrate.pm
CommitLineData
3ea94c60 1package PVE::QemuMigrate;
1ef75254 2
1e3baf05 3use strict;
3ea94c60 4use warnings;
6d7450cb 5
3ea94c60 6use IO::File;
1e3baf05 7use IPC::Open2;
6d7450cb
TL
8use Time::HiRes qw( usleep );
9
3ea94c60 10use PVE::Cluster;
eef93bc5 11use PVE::Format qw(render_bytes);
4b26ffbf 12use PVE::GuestHelpers qw(safe_boolean_ne safe_string_ne);
6d7450cb
TL
13use PVE::INotify;
14use PVE::RPCEnvironment;
15use PVE::Replication;
16use PVE::ReplicationConfig;
17use PVE::ReplicationState;
1e3baf05 18use PVE::Storage;
eef93bc5 19use PVE::StorageTunnel;
6d7450cb 20use PVE::Tools;
e594231b 21use PVE::Tunnel;
6d7450cb 22
912792e2 23use PVE::QemuConfig;
58c64ad5 24use PVE::QemuServer::CPUConfig;
e0fd2b2f 25use PVE::QemuServer::Drive;
28e6e180 26use PVE::QemuServer::Helpers qw(min_version);
3392d6ca 27use PVE::QemuServer::Machine;
0a13e08e 28use PVE::QemuServer::Monitor qw(mon_cmd);
6d7450cb 29use PVE::QemuServer;
1e3baf05 30
6d7450cb 31use PVE::AbstractMigrate;
16e903f2 32use base qw(PVE::AbstractMigrate);
1e3baf05 33
eef93bc5
FG
34# compared against remote end's minimum version
35our $WS_TUNNEL_VERSION = 2;
36
1e3baf05 37sub fork_tunnel {
ae194a5c 38 my ($self, $ssh_forward_info) = @_;
1e3baf05 39
e594231b
FG
40 my $cmd = ['/usr/sbin/qm', 'mtunnel'];
41 my $log = sub {
42 my ($level, $msg) = @_;
43 $self->log($level, $msg);
1e3baf05 44 };
1c9d54bf 45
e594231b 46 return PVE::Tunnel::fork_ssh_tunnel($self->{rem_ssh}, $cmd, $ssh_forward_info, $log);
1e3baf05
DM
47}
48
eef93bc5
FG
49sub fork_websocket_tunnel {
50 my ($self, $storages, $bridges) = @_;
51
52 my $remote = $self->{opts}->{remote};
53 my $conn = $remote->{conn};
54
55 my $log = sub {
56 my ($level, $msg) = @_;
57 $self->log($level, $msg);
58 };
59
60 my $websocket_url = "https://$conn->{host}:$conn->{port}/api2/json/nodes/$self->{node}/qemu/$remote->{vmid}/mtunnelwebsocket";
61 my $url = "/nodes/$self->{node}/qemu/$remote->{vmid}/mtunnel";
62
63 my $tunnel_params = {
64 url => $websocket_url,
65 };
66
67 my $storage_list = join(',', keys %$storages);
68 my $bridge_list = join(',', keys %$bridges);
69
70 my $req_params = {
71 storages => $storage_list,
72 bridges => $bridge_list,
73 };
74
75 return PVE::Tunnel::fork_websocket_tunnel($conn, $url, $req_params, $tunnel_params, $log);
76}
77
05b2a4ae
FG
78# tunnel_info:
79# proto: unix (secure) or tcp (insecure/legacy compat)
80# addr: IP or UNIX socket path
81# port: optional TCP port
82# unix_sockets: additional UNIX socket paths to forward
7d730f95 83sub start_remote_tunnel {
05b2a4ae 84 my ($self, $tunnel_info) = @_;
7d730f95
FE
85
86 my $nodename = PVE::INotify::nodename();
87 my $migration_type = $self->{opts}->{migration_type};
88
89 if ($migration_type eq 'secure') {
90
05b2a4ae
FG
91 if ($tunnel_info->{proto} eq 'unix') {
92 my $ssh_forward_info = [];
7d730f95 93
05b2a4ae
FG
94 my $unix_sockets = [ keys %{$tunnel_info->{unix_sockets}} ];
95 push @$unix_sockets, $tunnel_info->{addr};
7d730f95
FE
96 for my $sock (@$unix_sockets) {
97 push @$ssh_forward_info, "$sock:$sock";
98 unlink $sock;
99 }
100
101 $self->{tunnel} = $self->fork_tunnel($ssh_forward_info);
102
103 my $unix_socket_try = 0; # wait for the socket to become ready
104 while ($unix_socket_try <= 100) {
105 $unix_socket_try++;
106 my $available = 0;
107 foreach my $sock (@$unix_sockets) {
108 if (-S $sock) {
109 $available++;
110 }
111 }
112
113 if ($available == @$unix_sockets) {
114 last;
115 }
116
117 usleep(50000);
118 }
119 if ($unix_socket_try > 100) {
120 $self->{errors} = 1;
e594231b 121 PVE::Tunnel::finish_tunnel($self->{tunnel});
05b2a4ae 122 die "Timeout, migration socket $tunnel_info->{addr} did not get ready";
7d730f95
FE
123 }
124 $self->{tunnel}->{unix_sockets} = $unix_sockets if (@$unix_sockets);
125
05b2a4ae 126 } elsif ($tunnel_info->{proto} eq 'tcp') {
7d730f95 127 my $ssh_forward_info = [];
05b2a4ae 128 if ($tunnel_info->{addr} eq "localhost") {
7d730f95
FE
129 # for backwards compatibility with older qemu-server versions
130 my $pfamily = PVE::Tools::get_host_address_family($nodename);
131 my $lport = PVE::Tools::next_migrate_port($pfamily);
05b2a4ae 132 push @$ssh_forward_info, "$lport:localhost:$tunnel_info->{port}";
7d730f95
FE
133 }
134
135 $self->{tunnel} = $self->fork_tunnel($ssh_forward_info);
136
137 } else {
05b2a4ae 138 die "unsupported protocol in migration URI: $tunnel_info->{proto}\n";
7d730f95
FE
139 }
140 } else {
141 #fork tunnel for insecure migration, to send faster commands like resume
142 $self->{tunnel} = $self->fork_tunnel();
143 }
144}
145
16e903f2
DM
146sub lock_vm {
147 my ($self, $vmid, $code, @param) = @_;
f5eb281a 148
ffda963f 149 return PVE::QemuConfig->lock_config($vmid, $code, @param);
16e903f2 150}
ff1a2432 151
e3aad441
AL
152sub target_storage_check_available {
153 my ($self, $storecfg, $targetsid, $volid) = @_;
154
155 if (!$self->{opts}->{remote}) {
156 # check if storage is available on target node
157 my $target_scfg = PVE::Storage::storage_check_enabled(
158 $storecfg,
159 $targetsid,
160 $self->{node},
161 );
162 my ($vtype) = PVE::Storage::parse_volname($storecfg, $volid);
163 die "$volid: content type '$vtype' is not available on storage '$targetsid'\n"
164 if !$target_scfg->{content}->{$vtype};
165 }
166}
167
16e903f2
DM
168sub prepare {
169 my ($self, $vmid) = @_;
ff1a2432 170
16e903f2 171 my $online = $self->{opts}->{online};
3ea94c60 172
8a5bd889 173 my $storecfg = $self->{storecfg} = PVE::Storage::config();
3ea94c60 174
e1fc368d 175 # test if VM exists
ffda963f 176 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
3ea94c60 177
9c88e854
AD
178 my $version = PVE::QemuServer::Helpers::get_node_pvecfg_version($self->{node});
179 my $cloudinit_config = $conf->{cloudinit};
180
71cc2c41
TL
181 if (
182 PVE::QemuConfig->has_cloudinit($conf) && defined($cloudinit_config)
183 && scalar(keys %$cloudinit_config) > 0
184 && !PVE::QemuServer::Helpers::pvecfg_min_version($version, 7, 2, 13)
185 ) {
186 die "target node is too old (manager <= 7.2-13) and doesn't support new cloudinit section\n";
9c88e854
AD
187 }
188
c2c96d73
FE
189 my $repl_conf = PVE::ReplicationConfig->new();
190 $self->{replication_jobcfg} = $repl_conf->find_local_replication_job($vmid, $self->{node});
191 $self->{is_replicated} = $repl_conf->check_for_existing_jobs($vmid, 1);
192
19ff3682
FE
193 if ($self->{replication_jobcfg} && defined($self->{replication_jobcfg}->{remove_job})) {
194 die "refusing to migrate replicated VM whose replication job is marked for removal\n";
195 }
196
ffda963f 197 PVE::QemuConfig->check_lock($conf);
3ea94c60 198
16e903f2
DM
199 my $running = 0;
200 if (my $pid = PVE::QemuServer::check_running($vmid)) {
b6adff33 201 die "can't migrate running VM without --online\n" if !$online;
16e903f2 202 $running = $pid;
42dbd2ee 203
c2c96d73 204 if ($self->{is_replicated} && !$self->{replication_jobcfg}) {
68980d66
FE
205 if ($self->{opts}->{force}) {
206 $self->log('warn', "WARNING: Node '$self->{node}' is not a replication target. Existing " .
207 "replication jobs will fail after migration!\n");
208 } else {
209 die "Cannot live-migrate replicated VM to node '$self->{node}' - not a replication " .
210 "target. Use 'force' to override.\n";
211 }
212 }
213
3392d6ca 214 $self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf);
7bac824e 215
58c64ad5
SR
216 # To support custom CPU types, we keep QEMU's "-cpu" parameter intact.
217 # Since the parameter itself contains no reference to a custom model,
218 # this makes migration independent of changes to "cpu-models.conf".
219 if ($conf->{cpu}) {
b53ba8d0 220 my $cpuconf = PVE::JSONSchema::parse_property_string('pve-cpu-conf', $conf->{cpu});
58c64ad5
SR
221 if ($cpuconf && PVE::QemuServer::CPUConfig::is_custom_model($cpuconf->{cputype})) {
222 $self->{forcecpu} = PVE::QemuServer::CPUConfig::get_cpu_from_running_vm($pid);
223 }
224 }
a183576e
FE
225
226 $self->{vm_was_paused} = 1 if PVE::QemuServer::vm_is_paused($vmid);
3ea94c60 227 }
58c64ad5 228
92cd9b18
DC
229 my ($loc_res, $mapped_res, $missing_mappings_by_node) = PVE::QemuServer::check_local_resources($conf, 1);
230 my $blocking_resources = [];
231 for my $res ($loc_res->@*) {
232 if (!grep($res, $mapped_res->@*)) {
233 push $blocking_resources->@*, $res;
234 }
235 }
236 if (scalar($blocking_resources->@*)) {
16e903f2 237 if ($self->{running} || !$self->{opts}->{force}) {
92cd9b18 238 die "can't migrate VM which uses local devices: " . join(", ", $blocking_resources->@*) . "\n";
16e903f2
DM
239 } else {
240 $self->log('info', "migrating VM which uses local devices");
241 }
3ea94c60
DM
242 }
243
92cd9b18
DC
244 if (scalar($mapped_res->@*)) {
245 my $missing_mappings = $missing_mappings_by_node->{$self->{node}};
246 if ($running) {
247 die "can't migrate running VM which uses mapped devices: " . join(", ", $mapped_res->@*) . "\n";
248 } elsif (scalar($missing_mappings->@*)) {
249 die "can't migrate to '$self->{node}': missing mapped devices " . join(", ", $missing_mappings->@*) . "\n";
250 } else {
251 $self->log('info', "migrating VM which uses mapped local devices");
252 }
253 }
254
ff1a2432 255 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
eef93bc5
FG
256
257 my $storages = {};
29701766
FG
258 foreach my $volid (@$vollist) {
259 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
260
eef93bc5 261 # check if storage is available on source node
8a5bd889 262 my $scfg = PVE::Storage::storage_check_enabled($storecfg, $sid);
d213ba29 263
95b3583b 264 my $targetsid = $sid;
eef93bc5
FG
265 # NOTE: local ignores shared mappings, remote maps them
266 if (!$scfg->{shared} || $self->{opts}->{remote}) {
82a03671 267 $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid);
d213ba29
FE
268 }
269
eef93bc5 270 $storages->{$targetsid} = 1;
24b84b47 271
e3aad441 272 $self->target_storage_check_available($storecfg, $targetsid, $volid);
73f5ee92
FG
273
274 if ($scfg->{shared}) {
275 # PVE::Storage::activate_storage checks this for non-shared storages
276 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
277 warn "Used shared storage '$sid' is not online on source node!\n"
278 if !$plugin->check_connection($sid, $scfg);
73f5ee92 279 }
29701766 280 }
3ea94c60 281
eef93bc5
FG
282 if ($self->{opts}->{remote}) {
283 # test & establish websocket connection
284 my $bridges = map_bridges($conf, $self->{opts}->{bridgemap}, 1);
285 my $tunnel = $self->fork_websocket_tunnel($storages, $bridges);
286 my $min_version = $tunnel->{version} - $tunnel->{age};
287 $self->log('info', "local WS tunnel version: $WS_TUNNEL_VERSION");
288 $self->log('info', "remote WS tunnel version: $tunnel->{version}");
289 $self->log('info', "minimum required WS tunnel version: $min_version");
290 die "Remote tunnel endpoint not compatible, upgrade required\n"
291 if $WS_TUNNEL_VERSION < $min_version;
292 die "Remote tunnel endpoint too old, upgrade required\n"
293 if $WS_TUNNEL_VERSION > $tunnel->{version};
294
295 print "websocket tunnel started\n";
296 $self->{tunnel} = $tunnel;
297 } else {
298 # test ssh connection
299 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
300 eval { $self->cmd_quiet($cmd); };
301 die "Can't connect to destination address using public key\n" if $@;
302 }
ff1a2432 303
16e903f2 304 return $running;
3ea94c60
DM
305}
306
d10b78f4 307sub scan_local_volumes {
16e903f2
DM
308 my ($self, $vmid) = @_;
309
16e903f2
DM
310 my $conf = $self->{vmconf};
311
dabf2473 312 # local volumes which have been copied
37666e4c 313 # and their old_id => new_id pairs
37666e4c 314 $self->{volume_map} = {};
d10b78f4 315 $self->{local_volumes} = {};
3ea94c60 316
b10afa31 317 my $storecfg = $self->{storecfg};
3ea94c60
DM
318 eval {
319
dabf2473 320 # found local volumes and their origin
d10b78f4 321 my $local_volumes = $self->{local_volumes};
5bf7f0f1
FG
322 my $local_volumes_errors = {};
323 my $other_errors = [];
324 my $abort = 0;
3ea94c60 325
5bf7f0f1
FG
326 my $log_error = sub {
327 my ($msg, $volid) = @_;
328
329 if (defined($volid)) {
330 $local_volumes_errors->{$volid} = $msg;
331 } else {
332 push @$other_errors, $msg;
333 }
334 $abort = 1;
335 };
336
c2c96d73 337 my $replicatable_volumes = !$self->{replication_jobcfg} ? {}
2cd808d3 338 : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
4b26ffbf
FE
339 foreach my $volid (keys %{$replicatable_volumes}) {
340 $local_volumes->{$volid}->{replicated} = 1;
341 }
b9f44d27 342
3629c19d 343 my $test_volid = sub {
aee6abe5 344 my ($volid, $attr) = @_;
3ea94c60 345
5bf7f0f1 346 if ($volid =~ m|^/|) {
ec82e3ee 347 return if $attr->{shared};
6f58fce9 348 $local_volumes->{$volid}->{ref} = 'config';
5bf7f0f1
FG
349 die "local file/device\n";
350 }
3ea94c60 351
aee6abe5
DM
352 my $snaprefs = $attr->{referenced_in_snapshot};
353
354 if ($attr->{cdrom}) {
5bf7f0f1
FG
355 if ($volid eq 'cdrom') {
356 my $msg = "can't migrate local cdrom drive";
6e9c4929 357 if (defined($snaprefs) && !$attr->{is_attached}) {
aee6abe5 358 my $snapnames = join(', ', sort keys %$snaprefs);
5009a8c7 359 $msg .= " (referenced in snapshot - $snapnames)";
aee6abe5 360 }
5bf7f0f1
FG
361 &$log_error("$msg\n");
362 return;
363 }
3ea94c60 364 return if $volid eq 'none';
3ea94c60
DM
365 }
366
367 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
368
16e903f2 369 # check if storage is available on both nodes
0d2db084 370 my $scfg = PVE::Storage::storage_check_enabled($storecfg, $sid);
d213ba29 371
95b3583b 372 my $targetsid = $sid;
eef93bc5
FG
373 # NOTE: local ignores shared mappings, remote maps them
374 if (!$scfg->{shared} || $self->{opts}->{remote}) {
82a03671 375 $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid);
d213ba29
FE
376 }
377
e3aad441
AL
378 $self->target_storage_check_available($storecfg, $targetsid, $volid);
379 return if $scfg->{shared} && !$self->{opts}->{remote};
3ea94c60 380
6e9c4929
AL
381 $local_volumes->{$volid}->{ref} = 'pending' if $attr->{referenced_in_pending};
382 $local_volumes->{$volid}->{ref} = 'snapshot' if $attr->{referenced_in_snapshot};
383 $local_volumes->{$volid}->{ref} = 'unused' if $attr->{is_unused};
384 $local_volumes->{$volid}->{ref} = 'attached' if $attr->{is_attached};
f9dde219 385 $local_volumes->{$volid}->{ref} = 'generated' if $attr->{is_tpmstate};
d62fcf74 386
a0dbed5a
AL
387 $local_volumes->{$volid}->{bwlimit} = $self->get_bwlimit($sid, $targetsid);
388 $local_volumes->{$volid}->{targetsid} = $targetsid;
389
390 $local_volumes->{$volid}->@{qw(size format)} = PVE::Storage::volume_size_info($storecfg, $volid);
391
cc1a3820
FE
392 $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0;
393
a6be63ac
FE
394 $local_volumes->{$volid}->{drivename} = $attr->{drivename}
395 if $attr->{drivename};
396
9e93a63f
ML
397 if ($attr->{cdrom}) {
398 if ($volid =~ /vm-\d+-cloudinit/) {
399 $local_volumes->{$volid}->{ref} = 'generated';
400 return;
401 }
402 die "local cdrom image\n";
403 }
a0dbed5a
AL
404 # If with_snapshots is not set for storage migrate, it tries to use
405 # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size
406 # back to qcow2 is currently not possible.
407 $local_volumes->{$volid}->{snapshots} = ($local_volumes->{$volid}->{format} =~ /^(?:qcow2|vmdk)$/);
3629c19d 408
b10afa31 409 my ($path, $owner) = PVE::Storage::path($storecfg, $volid);
3ea94c60 410
5bf7f0f1 411 die "owned by other VM (owner = VM $owner)\n"
b10afa31 412 if !$owner || ($owner != $vmid);
3ea94c60 413
b24f07d4
FE
414 return if $attr->{is_vmstate};
415
aee6abe5 416 if (defined($snaprefs)) {
5eca0c36
FE
417 $local_volumes->{$volid}->{snapshots} = 1;
418
3629c19d
DM
419 # we cannot migrate shapshots on local storage
420 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
421
5cbf4d72
FE
422 die "online storage migration not possible if non-replicated snapshot exists\n"
423 if $self->{running} && !$local_volumes->{$volid}->{replicated};
424
eef93bc5
FG
425 die "remote migration with snapshots not supported yet\n" if $self->{opts}->{remote};
426
205dbf39
WB
427 if (!($scfg->{type} eq 'zfspool'
428 || ($scfg->{type} eq 'btrfs' && $local_volumes->{$volid}->{format} eq 'raw')
429 || $local_volumes->{$volid}->{format} eq 'qcow2'
430 )) {
5bf7f0f1 431 die "non-migratable snapshot exists\n";
3629c19d 432 }
3629c19d 433 }
3a7bc9e2
FG
434
435 die "referenced by linked clone(s)\n"
b10afa31 436 if PVE::Storage::volume_is_base_and_used($storecfg, $volid);
3629c19d
DM
437 };
438
0b7a0b78 439 PVE::QemuServer::foreach_volid($conf, sub {
aee6abe5
DM
440 my ($volid, $attr) = @_;
441 eval { $test_volid->($volid, $attr); };
442 if (my $err = $@) {
443 &$log_error($err, $volid);
444 }
445 });
3ea94c60 446
dabf2473 447 foreach my $vol (sort keys %$local_volumes) {
b9f44d27 448 my $type = $replicatable_volumes->{$vol} ? 'local, replicated' : 'local';
6f58fce9 449 my $ref = $local_volumes->{$vol}->{ref};
6e9c4929 450 if ($ref eq 'attached') {
dbc9420b
DM
451 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
452 if $self->{running} && !$self->{opts}->{"with-local-disks"};
6e9c4929
AL
453 $self->log('info', "found $type disk '$vol' (attached)\n");
454 } elsif ($ref eq 'unused') {
455 $self->log('info', "found $type disk '$vol' (unused)\n");
6f58fce9 456 } elsif ($ref eq 'snapshot') {
b9f44d27 457 $self->log('info', "found $type disk '$vol' (referenced by snapshot(s))\n");
6e9c4929
AL
458 } elsif ($ref eq 'pending') {
459 $self->log('info', "found $type disk '$vol' (pending change)\n");
9e93a63f
ML
460 } elsif ($ref eq 'generated') {
461 $self->log('info', "found generated disk '$vol' (in current VM config)\n");
d62fcf74 462 } else {
b9f44d27 463 $self->log('info', "found $type disk '$vol'\n");
d62fcf74
FG
464 }
465 }
466
5bf7f0f1
FG
467 foreach my $vol (sort keys %$local_volumes_errors) {
468 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
469 }
470 foreach my $err (@$other_errors) {
471 $self->log('warn', "$err");
472 }
473
5bf7f0f1
FG
474 if ($abort) {
475 die "can't migrate VM - check log\n";
476 }
477
c4d2d6c1 478 # additional checks for local storage
dabf2473 479 foreach my $volid (keys %$local_volumes) {
3ea94c60 480 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
b10afa31 481 my $scfg = PVE::Storage::storage_config($storecfg, $sid);
3ea94c60 482
205dbf39 483 my $migratable = $scfg->{type} =~ /^(?:dir|btrfs|zfspool|lvmthin|lvm)$/;
c4d2d6c1 484
eef93bc5
FG
485 # TODO: what is this even here for?
486 $migratable = 1 if $self->{opts}->{remote};
487
37a6dc78 488 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
c4d2d6c1 489 if !$migratable;
d5604092 490
c4d2d6c1 491 # image is a linked clone on local storage, se we can't migrate.
b10afa31 492 if (my $basename = (PVE::Storage::parse_volname($storecfg, $volid))[3]) {
c4d2d6c1 493 die "can't migrate '$volid' as it's a clone of '$basename'";
d5604092 494 }
3ea94c60
DM
495 }
496
eb3acec8 497 foreach my $volid (sort keys %$local_volumes) {
9e93a63f 498 my $ref = $local_volumes->{$volid}->{ref};
6e9c4929 499 if ($self->{running} && $ref eq 'attached') {
efe0d457 500 $local_volumes->{$volid}->{migration_mode} = 'online';
ad8b9d5e 501 } elsif ($self->{running} && $ref eq 'generated') {
104f47a9 502 # offline migrate the cloud-init ISO and don't regenerate on VM start
f9dde219
SR
503 #
504 # tpmstate will also be offline migrated first, and in case of
505 # live migration then updated by QEMU/swtpm if necessary
104f47a9 506 $local_volumes->{$volid}->{migration_mode} = 'offline';
b74cad8a 507 } else {
d10b78f4 508 $local_volumes->{$volid}->{migration_mode} = 'offline';
b74cad8a 509 }
3ea94c60
DM
510 }
511 };
d10b78f4
FE
512 die "Problem found while scanning volumes - $@" if $@;
513}
514
a6be63ac
FE
515sub handle_replication {
516 my ($self, $vmid) = @_;
517
518 my $conf = $self->{vmconf};
519 my $local_volumes = $self->{local_volumes};
520
521 return if !$self->{replication_jobcfg};
eef93bc5
FG
522
523 die "can't migrate VM with replicated volumes to remote cluster/node\n"
524 if $self->{opts}->{remote};
525
a6be63ac
FE
526 if ($self->{running}) {
527
528 my $version = PVE::QemuServer::kvm_user_version();
529 if (!min_version($version, 4, 2)) {
530 die "can't live migrate VM with replicated volumes, pve-qemu to old (< 4.2)!\n"
531 }
532
533 my @live_replicatable_volumes = $self->filter_local_volumes('online', 1);
534 foreach my $volid (@live_replicatable_volumes) {
535 my $drive = $local_volumes->{$volid}->{drivename};
536 die "internal error - no drive for '$volid'\n" if !defined($drive);
537
538 my $bitmap = "repl_$drive";
539
540 # start tracking before replication to get full delta + a few duplicates
541 $self->log('info', "$drive: start tracking writes using block-dirty-bitmap '$bitmap'");
542 mon_cmd($vmid, 'block-dirty-bitmap-add', node => "drive-$drive", name => $bitmap);
543
544 # other info comes from target node in phase 2
545 $self->{target_drive}->{$drive}->{bitmap} = $bitmap;
546 }
547 }
548 $self->log('info', "replicating disk images");
549
550 my $start_time = time();
551 my $logfunc = sub { $self->log('info', shift) };
552 my $actual_replicated_volumes = PVE::Replication::run_replication(
553 'PVE::QemuConfig', $self->{replication_jobcfg}, $start_time, $start_time, $logfunc);
554
555 # extra safety check
556 my @replicated_volumes = $self->filter_local_volumes(undef, 1);
557 foreach my $volid (@replicated_volumes) {
558 die "expected volume '$volid' to get replicated, but it wasn't\n"
559 if !$actual_replicated_volumes->{$volid};
560 }
561}
562
3276a434
FE
563sub config_update_local_disksizes {
564 my ($self) = @_;
565
566 my $conf = $self->{vmconf};
567 my $local_volumes = $self->{local_volumes};
568
569 PVE::QemuConfig->foreach_volume($conf, sub {
570 my ($key, $drive) = @_;
f9dde219
SR
571 # skip special disks, will be handled later
572 return if $key eq 'efidisk0';
573 return if $key eq 'tpmstate0';
3276a434
FE
574
575 my $volid = $drive->{file};
576 return if !defined($local_volumes->{$volid}); # only update sizes for local volumes
577
578 my ($updated, $msg) = PVE::QemuServer::Drive::update_disksize($drive, $local_volumes->{$volid}->{size});
579 if (defined($updated)) {
580 $conf->{$key} = PVE::QemuServer::print_drive($updated);
581 $self->log('info', "drive '$key': $msg");
582 }
583 });
584
585 # we want to set the efidisk size in the config to the size of the
586 # real OVMF_VARS.fd image, else we can create a too big image, which does not work
587 if (defined($conf->{efidisk0})) {
588 PVE::QemuServer::update_efidisk_size($conf);
589 }
f9dde219
SR
590
591 # TPM state might have an irregular filesize, to avoid problems on transfer
592 # we always assume the static size of 4M to allocate on the target
593 if (defined($conf->{tpmstate0})) {
594 PVE::QemuServer::update_tpmstate_size($conf);
595 }
3276a434
FE
596}
597
d10b78f4 598sub filter_local_volumes {
4b26ffbf 599 my ($self, $migration_mode, $replicated) = @_;
d10b78f4
FE
600
601 my $volumes = $self->{local_volumes};
602 my @filtered_volids;
603
604 foreach my $volid (sort keys %{$volumes}) {
605 next if defined($migration_mode) && safe_string_ne($volumes->{$volid}->{migration_mode}, $migration_mode);
4b26ffbf 606 next if defined($replicated) && safe_boolean_ne($volumes->{$volid}->{replicated}, $replicated);
d10b78f4
FE
607 push @filtered_volids, $volid;
608 }
609
610 return @filtered_volids;
611}
612
613sub sync_offline_local_volumes {
614 my ($self) = @_;
615
616 my $local_volumes = $self->{local_volumes};
4b26ffbf 617 my @volids = $self->filter_local_volumes('offline', 0);
d10b78f4
FE
618
619 my $storecfg = $self->{storecfg};
620 my $opts = $self->{opts};
621
622 $self->log('info', "copying local disk images") if scalar(@volids);
623
624 foreach my $volid (@volids) {
eef93bc5 625 my $new_volid;
d10b78f4 626
eef93bc5
FG
627 my $opts = $self->{opts};
628 if ($opts->{remote}) {
629 my $log = sub {
630 my ($level, $msg) = @_;
631 $self->log($level, $msg);
632 };
633
634 $new_volid = PVE::StorageTunnel::storage_migrate(
635 $self->{tunnel},
636 $storecfg,
637 $volid,
638 $self->{vmid},
639 $opts->{remote}->{vmid},
640 $local_volumes->{$volid},
641 $log,
642 );
643 } else {
644 my $targetsid = $local_volumes->{$volid}->{targetsid};
645
646 my $bwlimit = $local_volumes->{$volid}->{bwlimit};
647 $bwlimit = $bwlimit * 1024 if defined($bwlimit); # storage_migrate uses bps
648
649 my $storage_migrate_opts = {
650 'ratelimit_bps' => $bwlimit,
651 'insecure' => $opts->{migration_type} eq 'insecure',
652 'with_snapshots' => $local_volumes->{$volid}->{snapshots},
653 'allow_rename' => !$local_volumes->{$volid}->{is_vmstate},
654 };
655
656 my $logfunc = sub { $self->log('info', $_[0]); };
657 $new_volid = eval {
658 PVE::Storage::storage_migrate(
659 $storecfg,
660 $volid,
661 $self->{ssh_info},
662 $targetsid,
663 $storage_migrate_opts,
664 $logfunc,
665 );
666 };
667 if (my $err = $@) {
668 die "storage migration for '$volid' to storage '$targetsid' failed - $err\n";
669 }
d10b78f4
FE
670 }
671
672 $self->{volume_map}->{$volid} = $new_volid;
673 $self->log('info', "volume '$volid' is '$new_volid' on the target\n");
674
675 eval { PVE::Storage::deactivate_volumes($storecfg, [$volid]); };
676 if (my $err = $@) {
677 $self->log('warn', $err);
678 }
679 }
3ea94c60
DM
680}
681
b74cad8a
AD
682sub cleanup_remotedisks {
683 my ($self) = @_;
684
eef93bc5
FG
685 if ($self->{opts}->{remote}) {
686 PVE::Tunnel::finish_tunnel($self->{tunnel}, 1);
687 delete $self->{tunnel};
688 return;
689 }
690
4b26ffbf
FE
691 my $local_volumes = $self->{local_volumes};
692
eb5751ba 693 foreach my $volid (values %{$self->{volume_map}}) {
9b6efe43 694 # don't clean up replicated disks!
4b26ffbf 695 next if $local_volumes->{$volid}->{replicated};
b74cad8a 696
eb5751ba 697 my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
b74cad8a
AD
698
699 my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
700
701 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
702 if (my $err = $@) {
703 $self->log('err', $err);
704 $self->{errors} = 1;
705 }
706 }
707}
708
9b6efe43
FG
709sub cleanup_bitmaps {
710 my ($self) = @_;
7f5fb49a 711 foreach my $drive (keys %{$self->{target_drive}}) {
9b6efe43
FG
712 my $bitmap = $self->{target_drive}->{$drive}->{bitmap};
713 next if !$bitmap;
714 $self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'");
715 mon_cmd($self->{vmid}, 'block-dirty-bitmap-remove', node => "drive-$drive", name => $bitmap);
716 }
717}
718
1e3baf05 719sub phase1 {
16e903f2 720 my ($self, $vmid) = @_;
1e3baf05 721
16e903f2 722 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
1e3baf05 723
16e903f2 724 my $conf = $self->{vmconf};
1e3baf05
DM
725
726 # set migrate lock in config file
1858638f 727 $conf->{lock} = 'migrate';
ffda963f 728 PVE::QemuConfig->write_config($vmid, $conf);
1e3baf05 729
d10b78f4 730 $self->scan_local_volumes($vmid);
3276a434
FE
731
732 # fix disk sizes to match their actual size and write changes,
733 # so that the target allocates the correct volumes
734 $self->config_update_local_disksizes();
68b108ee 735 PVE::QemuConfig->write_config($vmid, $conf);
d10b78f4 736
a6be63ac
FE
737 $self->handle_replication($vmid);
738
d10b78f4 739 $self->sync_offline_local_volumes();
eef93bc5 740 $self->phase1_remote($vmid) if $self->{opts}->{remote};
1e3baf05
DM
741};
742
eef93bc5
FG
743sub map_bridges {
744 my ($conf, $map, $scan_only) = @_;
745
746 my $bridges = {};
747
748 foreach my $opt (keys %$conf) {
749 next if $opt !~ m/^net\d+$/;
750
751 next if !$conf->{$opt};
752 my $d = PVE::QemuServer::parse_net($conf->{$opt});
753 next if !$d || !$d->{bridge};
754
755 my $target_bridge = PVE::JSONSchema::map_id($map, $d->{bridge});
756 $bridges->{$target_bridge}->{$opt} = $d->{bridge};
757
758 next if $scan_only;
759
760 $d->{bridge} = $target_bridge;
761 $conf->{$opt} = PVE::QemuServer::print_net($d);
762 }
763
764 return $bridges;
765}
766
767sub phase1_remote {
768 my ($self, $vmid) = @_;
769
770 my $remote_conf = PVE::QemuConfig->load_config($vmid);
771 PVE::QemuConfig->update_volume_ids($remote_conf, $self->{volume_map});
772
773 my $bridges = map_bridges($remote_conf, $self->{opts}->{bridgemap});
774 for my $target (keys $bridges->%*) {
775 for my $nic (keys $bridges->{$target}->%*) {
776 $self->log('info', "mapped: $nic from $bridges->{$target}->{$nic} to $target");
777 }
778 }
779
780 my @online_local_volumes = $self->filter_local_volumes('online');
781
782 my $storage_map = $self->{opts}->{storagemap};
783 $self->{nbd} = {};
784 PVE::QemuConfig->foreach_volume($remote_conf, sub {
785 my ($ds, $drive) = @_;
786
787 # TODO eject CDROM?
788 return if PVE::QemuServer::drive_is_cdrom($drive);
789
790 my $volid = $drive->{file};
791 return if !$volid;
792
793 return if !grep { $_ eq $volid} @online_local_volumes;
794
795 my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
796 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
797 my $source_format = PVE::QemuServer::qemu_img_format($scfg, $volname);
798
799 # set by target cluster
800 my $oldvolid = delete $drive->{file};
801 delete $drive->{format};
802
803 my $targetsid = PVE::JSONSchema::map_id($storage_map, $storeid);
804
805 my $params = {
806 format => $source_format,
807 storage => $targetsid,
808 drive => $drive,
809 };
810
811 $self->log('info', "Allocating volume for drive '$ds' on remote storage '$targetsid'..");
812 my $res = PVE::Tunnel::write_tunnel($self->{tunnel}, 600, 'disk', $params);
813
814 $self->log('info', "volume '$oldvolid' is '$res->{volid}' on the target\n");
815 $remote_conf->{$ds} = $res->{drivestr};
816 $self->{nbd}->{$ds} = $res;
817 });
818
819 my $conf_str = PVE::QemuServer::write_vm_config("remote", $remote_conf);
820
821 # TODO expose in PVE::Firewall?
822 my $vm_fw_conf_path = "/etc/pve/firewall/$vmid.fw";
823 my $fw_conf_str;
824 $fw_conf_str = PVE::Tools::file_get_contents($vm_fw_conf_path)
825 if -e $vm_fw_conf_path;
826 my $params = {
827 conf => $conf_str,
828 'firewall-config' => $fw_conf_str,
829 };
830
831 PVE::Tunnel::write_tunnel($self->{tunnel}, 10, 'config', $params);
832}
833
16e903f2
DM
834sub phase1_cleanup {
835 my ($self, $vmid, $err) = @_;
836
837 $self->log('info', "aborting phase 1 - cleanup resources");
838
1858638f
DM
839 my $conf = $self->{vmconf};
840 delete $conf->{lock};
ffda963f 841 eval { PVE::QemuConfig->write_config($vmid, $conf) };
16e903f2
DM
842 if (my $err = $@) {
843 $self->log('err', $err);
844 }
f5eb281a 845
eb5751ba
FE
846 eval { $self->cleanup_remotedisks() };
847 if (my $err = $@) {
848 $self->log('err', $err);
16e903f2 849 }
9b6efe43
FG
850
851 eval { $self->cleanup_bitmaps() };
852 if (my $err =$@) {
853 $self->log('err', $err);
854 }
16e903f2
DM
855}
856
05b2a4ae
FG
857sub phase2_start_local_cluster {
858 my ($self, $vmid, $params) = @_;
1e3baf05 859
16e903f2 860 my $conf = $self->{vmconf};
c3417e3b 861 my $local_volumes = $self->{local_volumes};
efe0d457
FE
862 my @online_local_volumes = $self->filter_local_volumes('online');
863
05b2a4ae
FG
864 my $start = $params->{start_params};
865 my $migrate = $params->{migrate_opts};
16e903f2 866
46a84fd4 867 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
1e3baf05 868
05b2a4ae 869 my $tunnel_info = {};
7e8dcf2c 870
19672434 871 ## start on remote node
95a4b4a9
AD
872 my $cmd = [@{$self->{rem_ssh}}];
873
05b2a4ae
FG
874 push @$cmd, 'qm', 'start', $vmid;
875
876 if ($start->{skiplock}) {
877 push @$cmd, '--skiplock';
95a4b4a9
AD
878 }
879
05b2a4ae 880 push @$cmd, '--migratedfrom', $migrate->{migratedfrom};
1c9d54bf 881
05b2a4ae 882 push @$cmd, '--migration_type', $migrate->{type};
2de2d6f7 883
05b2a4ae
FG
884 push @$cmd, '--migration_network', $migrate->{network}
885 if $migrate->{network};
2de2d6f7 886
05b2a4ae 887 push @$cmd, '--stateuri', $start->{statefile};
2de2d6f7 888
05b2a4ae
FG
889 if ($start->{forcemachine}) {
890 push @$cmd, '--machine', $start->{forcemachine};
42668529
DM
891 }
892
05b2a4ae
FG
893 if ($start->{forcecpu}) {
894 push @$cmd, '--force-cpu', $start->{forcecpu};
58c64ad5
SR
895 }
896
efe0d457 897 if ($self->{storage_migration}) {
4530494b 898 push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1');
b74cad8a
AD
899 }
900
86b8228b 901 my $spice_port;
05b2a4ae 902 my $input = "nbd_protocol_version: $migrate->{nbd_proto_version}\n";
fd95d780 903
13d121d7
FE
904 my @offline_local_volumes = $self->filter_local_volumes('offline');
905 for my $volid (@offline_local_volumes) {
906 my $drivename = $local_volumes->{$volid}->{drivename};
907 next if !$drivename || !$conf->{$drivename};
908
909 my $new_volid = $self->{volume_map}->{$volid};
910 next if !$new_volid || $volid eq $new_volid;
911
912 # FIXME PVE 8.x only use offline_volume variant once all targets can handle it
913 if ($drivename eq 'tpmstate0') {
914 $input .= "$drivename: $new_volid\n"
915 } else {
916 $input .= "offline_volume: $drivename: $new_volid\n"
917 }
fd95d780
FG
918 }
919
05b2a4ae 920 $input .= "spice_ticket: $migrate->{spice_ticket}\n" if $migrate->{spice_ticket};
cee620e6 921
4b26ffbf
FE
922 my @online_replicated_volumes = $self->filter_local_volumes('online', 1);
923 foreach my $volid (@online_replicated_volumes) {
efe0d457 924 $input .= "replicated_volume: $volid\n";
88126be3
FG
925 }
926
efbbe59d
FE
927 my $handle_storage_migration_listens = sub {
928 my ($drive_key, $drivestr, $nbd_uri) = @_;
929
930 $self->{stopnbd} = 1;
931 $self->{target_drive}->{$drive_key}->{drivestr} = $drivestr;
932 $self->{target_drive}->{$drive_key}->{nbd_uri} = $nbd_uri;
933
934 my $source_drive = PVE::QemuServer::parse_drive($drive_key, $conf->{$drive_key});
935 my $target_drive = PVE::QemuServer::parse_drive($drive_key, $drivestr);
936 my $source_volid = $source_drive->{file};
937 my $target_volid = $target_drive->{file};
938
939 $self->{volume_map}->{$source_volid} = $target_volid;
940 $self->log('info', "volume '$source_volid' is '$target_volid' on the target\n");
941 };
942
88126be3 943 my $target_replicated_volumes = {};
86b8228b 944
7c14dcae
DM
945 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
946 # instead we pipe it through STDIN
7827de41 947 my $exitcode = PVE::Tools::run_command($cmd, input => $input, outfunc => sub {
1e3baf05
DM
948 my $line = shift;
949
05b2a4ae
FG
950 if ($line =~ m/^migration listens on (tcp):(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
951 $tunnel_info->{addr} = $2;
952 $tunnel_info->{port} = int($3);
953 $tunnel_info->{proto} = $1;
1c9d54bf 954 }
05b2a4ae
FG
955 elsif ($line =~ m!^migration listens on (unix):(/run/qemu-server/(\d+)\.migrate)$!) {
956 $tunnel_info->{addr} = $2;
957 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $3;
958 $tunnel_info->{proto} = $1;
5bc1e039
SP
959 }
960 elsif ($line =~ m/^migration listens on port (\d+)$/) {
05b2a4ae
FG
961 $tunnel_info->{addr} = "localhost";
962 $tunnel_info->{port} = int($1);
963 $tunnel_info->{proto} = "tcp";
5bc1e039 964 }
f3a483b6 965 elsif ($line =~ m/^spice listens on port (\d+)$/) {
86b8228b 966 $spice_port = int($1);
1e3baf05 967 }
769f187d 968 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
8b02e568 969 my $drivestr = $4;
b74cad8a
AD
970 my $nbd_uri = "nbd:$1:$2:exportname=$3";
971 my $targetdrive = $3;
972 $targetdrive =~ s/drive-//g;
973
efbbe59d 974 $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri);
7827de41
ML
975 } elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) {
976 my $drivestr = $4;
977 die "Destination UNIX socket's VMID does not match source VMID" if $vmid ne $2;
978 my $nbd_unix_addr = $1;
979 my $nbd_uri = "nbd:unix:$nbd_unix_addr:exportname=$3";
980 my $targetdrive = $3;
981 $targetdrive =~ s/drive-//g;
b74cad8a 982
efbbe59d 983 $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri);
05b2a4ae 984 $tunnel_info->{unix_sockets}->{$nbd_unix_addr} = 1;
88126be3
FG
985 } elsif ($line =~ m/^re-using replicated volume: (\S+) - (.*)$/) {
986 my $drive = $1;
987 my $volid = $2;
988 $target_replicated_volumes->{$volid} = $drive;
8bf30c2a
SR
989 } elsif ($line =~ m/^QEMU: (.*)$/) {
990 $self->log('info', "[$self->{node}] $1\n");
b74cad8a 991 }
ab399b7c
AD
992 }, errfunc => sub {
993 my $line = shift;
8bf30c2a 994 $self->log('info', "[$self->{node}] $line");
6e0216d8
SR
995 }, noerr => 1);
996
997 die "remote command failed with exit code $exitcode\n" if $exitcode;
1e3baf05 998
05b2a4ae 999 die "unable to detect remote migration address\n" if !$tunnel_info->{addr} || !$tunnel_info->{proto};
1ef75254 1000
4b26ffbf 1001 if (scalar(keys %$target_replicated_volumes) != scalar(@online_replicated_volumes)) {
88126be3
FG
1002 die "number of replicated disks on source and target node do not match - target node too old?\n"
1003 }
1004
05b2a4ae
FG
1005 return ($tunnel_info, $spice_port);
1006}
1007
eef93bc5
FG
1008sub phase2_start_remote_cluster {
1009 my ($self, $vmid, $params) = @_;
1010
1011 die "insecure migration to remote cluster not implemented\n"
1012 if $params->{migrate_opts}->{type} ne 'websocket';
1013
1014 my $remote_vmid = $self->{opts}->{remote}->{vmid};
1015
1016 # like regular start but with some overhead accounted for
1017 my $timeout = PVE::QemuServer::Helpers::config_aware_timeout($self->{vmconf}) + 10;
1018
1019 my $res = PVE::Tunnel::write_tunnel($self->{tunnel}, $timeout, "start", $params);
1020
1021 foreach my $drive (keys %{$res->{drives}}) {
1022 $self->{stopnbd} = 1;
1023 $self->{target_drive}->{$drive}->{drivestr} = $res->{drives}->{$drive}->{drivestr};
1024 my $nbd_uri = $res->{drives}->{$drive}->{nbd_uri};
1025 die "unexpected NBD uri for '$drive': $nbd_uri\n"
1026 if $nbd_uri !~ s!/run/qemu-server/$remote_vmid\_!/run/qemu-server/$vmid\_!;
1027
1028 $self->{target_drive}->{$drive}->{nbd_uri} = $nbd_uri;
1029 }
1030
1031 return ($res->{migrate}, $res->{spice_port});
1032}
1033
05b2a4ae
FG
1034sub phase2 {
1035 my ($self, $vmid) = @_;
1036
1037 my $conf = $self->{vmconf};
eef93bc5 1038 my $local_volumes = $self->{local_volumes};
05b2a4ae
FG
1039
1040 # version > 0 for unix socket support
1041 my $nbd_protocol_version = 1;
1042
1043 my $spice_ticket;
1044 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
1045 my $res = mon_cmd($vmid, 'query-spice');
1046 $spice_ticket = $res->{ticket};
1047 }
1048
1049 my $migration_type = $self->{opts}->{migration_type};
1050 my $state_uri = $migration_type eq 'insecure' ? 'tcp' : 'unix';
1051
1052 my $params = {
1053 start_params => {
1054 statefile => $state_uri,
1055 forcemachine => $self->{forcemachine},
1056 forcecpu => $self->{forcecpu},
1057 skiplock => 1,
1058 },
1059 migrate_opts => {
1060 spice_ticket => $spice_ticket,
1061 type => $migration_type,
1062 network => $self->{opts}->{migration_network},
1063 storagemap => $self->{opts}->{storagemap},
1064 migratedfrom => PVE::INotify::nodename(),
1065 nbd_proto_version => $nbd_protocol_version,
1066 nbd => $self->{nbd},
1067 },
1068 };
1069
eef93bc5 1070 my ($tunnel_info, $spice_port);
05b2a4ae 1071
eef93bc5
FG
1072 my @online_local_volumes = $self->filter_local_volumes('online');
1073 $self->{storage_migration} = 1 if scalar(@online_local_volumes);
1074
1075 if (my $remote = $self->{opts}->{remote}) {
1076 my $remote_vmid = $remote->{vmid};
1077 $params->{migrate_opts}->{remote_node} = $self->{node};
1078 ($tunnel_info, $spice_port) = $self->phase2_start_remote_cluster($vmid, $params);
1079 die "only UNIX sockets are supported for remote migration\n"
1080 if $tunnel_info->{proto} ne 'unix';
1081
1082 my $remote_socket = $tunnel_info->{addr};
1083 my $local_socket = $remote_socket;
1084 $local_socket =~ s/$remote_vmid/$vmid/g;
1085 $tunnel_info->{addr} = $local_socket;
1086
1087 $self->log('info', "Setting up tunnel for '$local_socket'");
1088 PVE::Tunnel::forward_unix_socket($self->{tunnel}, $local_socket, $remote_socket);
1089
1090 foreach my $remote_socket (@{$tunnel_info->{unix_sockets}}) {
1091 my $local_socket = $remote_socket;
1092 $local_socket =~ s/$remote_vmid/$vmid/g;
1093 next if $self->{tunnel}->{forwarded}->{$local_socket};
1094 $self->log('info', "Setting up tunnel for '$local_socket'");
1095 PVE::Tunnel::forward_unix_socket($self->{tunnel}, $local_socket, $remote_socket);
1096 }
1097 } else {
1098 ($tunnel_info, $spice_port) = $self->phase2_start_local_cluster($vmid, $params);
1099
1100 $self->log('info', "start remote tunnel");
1101 $self->start_remote_tunnel($tunnel_info);
1102 }
05b2a4ae
FG
1103
1104 my $migrate_uri = "$tunnel_info->{proto}:$tunnel_info->{addr}";
1105 $migrate_uri .= ":$tunnel_info->{port}"
1106 if defined($tunnel_info->{port});
d296ed08 1107
efe0d457 1108 if ($self->{storage_migration}) {
b74cad8a
AD
1109 $self->{storage_migration_jobs} = {};
1110 $self->log('info', "starting storage migration");
1111
bd2d5fe6 1112 die "The number of local disks does not match between the source and the destination.\n"
efe0d457 1113 if (scalar(keys %{$self->{target_drive}}) != scalar(@online_local_volumes));
b74cad8a 1114 foreach my $drive (keys %{$self->{target_drive}}){
d189e590
SI
1115 my $target = $self->{target_drive}->{$drive};
1116 my $nbd_uri = $target->{nbd_uri};
683ab654 1117
1764fa05 1118 my $source_drive = PVE::QemuServer::parse_drive($drive, $conf->{$drive});
97ece9dd 1119 my $source_volid = $source_drive->{file};
97ece9dd 1120
05b2a4ae 1121 my $bwlimit = $self->{local_volumes}->{$source_volid}->{bwlimit};
9b6efe43 1122 my $bitmap = $target->{bitmap};
d189e590 1123
d108cb1e 1124 $self->log('info', "$drive: start migration to $nbd_uri");
9b6efe43 1125 PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 'skip', undef, $bwlimit, $bitmap);
b74cad8a
AD
1126 }
1127 }
1128
05b2a4ae 1129 $self->log('info', "starting online/live migration on $migrate_uri");
5bc1e039 1130 $self->{livemigration} = 1;
e18b0b99 1131
3beb415b
AD
1132 # load_defaults
1133 my $defaults = PVE::QemuServer::load_defaults();
1134
7de328c6
TL
1135 $self->log('info', "set migration capabilities");
1136 eval { PVE::QemuServer::set_migration_caps($vmid) };
485449e3
SR
1137 warn $@ if $@;
1138
1139 my $qemu_migrate_params = {};
1140
ddd664d7
SI
1141 # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
1142 # migrate_speed parameter in qm.conf - take the lower of the two.
eef93bc5
FG
1143 my $bwlimit = $self->get_bwlimit();
1144
2c4ba4c3 1145 my $migrate_speed = $conf->{migrate_speed} // 0;
8f43ac48 1146 $migrate_speed *= 1024; # migrate_speed is in MB/s, bwlimit in KB/s
ddd664d7 1147
2c4ba4c3
FE
1148 if ($bwlimit && $migrate_speed) {
1149 $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed;
1150 } else {
1151 $migrate_speed ||= $bwlimit;
1152 }
a89bd100 1153 $migrate_speed ||= ($defaults->{migrate_speed} || 0) * 1024;
ddd664d7 1154
a89bd100
TL
1155 if ($migrate_speed) {
1156 $migrate_speed *= 1024; # qmp takes migrate_speed in B/s.
0fca250a 1157 $self->log('info', "migration speed limit: ". render_bytes($migrate_speed, 1) ."/s");
8f43ac48
TL
1158 } else {
1159 # always set migrate speed as QEMU default to 128 MiBps == 1 Gbps, use 16 GiBps == 128 Gbps
1160 $migrate_speed = (16 << 30);
a89bd100 1161 }
8f43ac48 1162 $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
3beb415b
AD
1163
1164 my $migrate_downtime = $defaults->{migrate_downtime};
1165 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
c05f1b33
SR
1166 # migrate-set-parameters expects limit in ms
1167 $migrate_downtime *= 1000;
1168 $self->log('info', "migration downtime limit: $migrate_downtime ms");
1169 $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
3beb415b 1170
171ed95c
EK
1171 # set cachesize to 10% of the total memory
1172 my $memory = $conf->{memory} || $defaults->{memory};
1173 my $cachesize = int($memory * 1048576 / 10);
50d8dd5d
AD
1174 $cachesize = round_powerof2($cachesize);
1175
0fca250a 1176 $self->log('info', "migration cachesize: " . render_bytes($cachesize, 1));
485449e3
SR
1177 $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
1178
1179 $self->log('info', "set migration parameters");
e18b0b99 1180 eval {
485449e3 1181 mon_cmd($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
e18b0b99 1182 };
485449e3 1183 $self->log('info', "migrate-set-parameters error: $@") if $@;
f34d1466 1184
eef93bc5 1185 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && !$self->{opts}->{remote}) {
95a4b4a9
AD
1186 my $rpcenv = PVE::RPCEnvironment::get();
1187 my $authuser = $rpcenv->get_user();
1188
86b8228b 1189 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
95a4b4a9 1190
86b8228b 1191 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
769f187d 1192 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
95a4b4a9
AD
1193
1194 $self->log('info', "spice client_migrate_info");
1195
1196 eval {
0a13e08e 1197 mon_cmd($vmid, "client_migrate_info", protocol => 'spice',
ccab68c2 1198 hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
86b8228b 1199 'cert-subject' => $subject);
95a4b4a9
AD
1200 };
1201 $self->log('info', "client_migrate_info error: $@") if $@;
1202
1203 }
1204
9938d24d
FE
1205 my $start = time();
1206
05b2a4ae 1207 $self->log('info', "start migrate command to $migrate_uri");
5a7835f5 1208 eval {
05b2a4ae 1209 mon_cmd($vmid, "migrate", uri => $migrate_uri);
5a7835f5
AD
1210 };
1211 my $merr = $@;
05b2a4ae 1212 $self->log('info', "migrate uri => $migrate_uri failed: $merr") if $merr;
1e3baf05 1213
e693c491 1214 my $last_mem_transferred = 0;
4305207d 1215 my $usleep = 1000000;
e52bd94c 1216 my $i = 0;
b0b756c1 1217 my $err_count = 0;
865ef132
SP
1218 my $lastrem = undef;
1219 my $downtimecounter = 0;
1e3baf05 1220 while (1) {
e52bd94c 1221 $i++;
e693c491 1222 my $avglstat = $last_mem_transferred ? $last_mem_transferred / $i : 0;
e52bd94c 1223
b0b756c1 1224 usleep($usleep);
6539865a
TL
1225
1226 my $stat = eval { mon_cmd($vmid, "query-migrate") };
b0b756c1
DM
1227 if (my $err = $@) {
1228 $err_count++;
1229 warn "query migrate failed: $err\n";
f34d1466 1230 $self->log('info', "query migrate failed: $err");
b0b756c1 1231 if ($err_count <= 5) {
6539865a 1232 usleep(1_000_000);
b0b756c1
DM
1233 next;
1234 }
1235 die "too many query migrate failures - aborting\n";
1236 }
985a5f48 1237
6539865a
TL
1238 my $status = $stat->{status};
1239 if (defined($status) && $status =~ m/^(setup)$/im) {
1240 sleep(1);
1241 next;
1242 }
f5eb281a 1243
6539865a
TL
1244 if (!defined($status) || $status !~ m/^(active|completed|failed|cancelled)$/im) {
1245 die $merr if $merr;
1246 die "unable to parse migration status '$status' - aborting\n";
1247 }
1248 $merr = undef;
1249 $err_count = 0;
1250
e693c491
TL
1251 my $memstat = $stat->{ram};
1252
6539865a
TL
1253 if ($status eq 'completed') {
1254 my $delay = time() - $start;
1255 if ($delay > 0) {
0fca250a
TL
1256 my $total = $memstat->{total} || 0;
1257 my $avg_speed = render_bytes($total / $delay, 1);
6539865a 1258 my $downtime = $stat->{downtime} || 0;
0fca250a 1259 $self->log('info', "average migration speed: $avg_speed/s - downtime $downtime ms");
1e3baf05 1260 }
6539865a 1261 }
1e3baf05 1262
6539865a 1263 if ($status eq 'failed' || $status eq 'cancelled') {
55d07411
FE
1264 my $message = $stat->{'error-desc'} ? "$status - $stat->{'error-desc'}" : $status;
1265 $self->log('info', "migration status error: $message");
6539865a
TL
1266 die "aborting\n"
1267 }
a05b47a8 1268
6539865a
TL
1269 if ($status ne 'active') {
1270 $self->log('info', "migration status: $status");
1271 last;
1272 }
2e787b18 1273
e693c491
TL
1274 if ($memstat->{transferred} ne $last_mem_transferred) {
1275 my $trans = $memstat->{transferred} || 0;
1276 my $rem = $memstat->{remaining} || 0;
1277 my $total = $memstat->{total} || 0;
0fca250a
TL
1278 my $speed = ($memstat->{'pages-per-second'} // 0) * ($memstat->{'page-size'} // 0);
1279 my $dirty_rate = ($memstat->{'dirty-pages-rate'} // 0) * ($memstat->{'page-size'} // 0);
a05b47a8 1280
6539865a
TL
1281 # reduce sleep if remainig memory is lower than the average transfer speed
1282 $usleep = 100_000 if $avglstat && $rem < $avglstat;
865ef132 1283
b68a957b
TL
1284 # also reduce loggin if we poll more frequent
1285 my $should_log = $usleep > 100_000 ? 1 : ($i % 10) == 0;
370b05e7 1286
0fca250a
TL
1287 my $total_h = render_bytes($total, 1);
1288 my $transferred_h = render_bytes($trans, 1);
1289 my $speed_h = render_bytes($speed, 1);
1290
1291 my $progress = "transferred $transferred_h of $total_h VM-state, ${speed_h}/s";
1292
1293 if ($dirty_rate > $speed) {
1294 my $dirty_rate_h = render_bytes($dirty_rate, 1);
1295 $progress .= ", VM dirties lots of memory: $dirty_rate_h/s";
1296 }
1297
b68a957b 1298 $self->log('info', "migration $status, $progress") if $should_log;
0fca250a
TL
1299
1300 my $xbzrle = $stat->{"xbzrle-cache"} || {};
1301 my ($xbzrlebytes, $xbzrlepages) = $xbzrle->@{'bytes', 'pages'};
1302 if ($xbzrlebytes || $xbzrlepages) {
1303 my $bytes_h = render_bytes($xbzrlebytes, 1);
1304
1305 my $msg = "send updates to $xbzrlepages pages in $bytes_h encoded memory";
1306
1307 $msg .= sprintf(", cache-miss %.2f%%", $xbzrle->{'cache-miss-rate'} * 100)
1308 if $xbzrle->{'cache-miss-rate'};
1309
1310 $msg .= ", overflow $xbzrle->{overflow}" if $xbzrle->{overflow};
1311
b68a957b 1312 $self->log('info', "xbzrle: $msg") if $should_log;
6539865a
TL
1313 }
1314
e693c491 1315 if (($lastrem && $rem > $lastrem) || ($rem == 0)) {
6539865a
TL
1316 $downtimecounter++;
1317 }
1318 $lastrem = $rem;
1319
1320 if ($downtimecounter > 5) {
1321 $downtimecounter = 0;
1322 $migrate_downtime *= 2;
1323 $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms");
1324 eval {
1325 # migrate-set-parameters does not touch values not
1326 # specified, so this only changes downtime-limit
1327 mon_cmd($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime));
1328 };
1329 $self->log('info', "migrate-set-parameters error: $@") if $@;
1330 }
1e3baf05 1331 }
6539865a 1332
e693c491 1333 $last_mem_transferred = $memstat->{transferred};
a05b47a8 1334 }
0783c3c2
FE
1335
1336 if ($self->{storage_migration}) {
1337 # finish block-job with block-job-cancel, to disconnect source VM from NBD
1338 # to avoid it trying to re-establish it. We are in blockjob ready state,
1339 # thus, this command changes to it to blockjob complete (see qapi docs)
1340 eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, 'cancel'); };
1341 if (my $err = $@) {
1342 die "Failed to complete storage migration: $err\n";
1343 }
1344 }
1e3baf05 1345}
16e903f2 1346
c04b5b04
AD
1347sub phase2_cleanup {
1348 my ($self, $vmid, $err) = @_;
1349
af30308f
DM
1350 return if !$self->{errors};
1351 $self->{phase2errors} = 1;
1352
c04b5b04
AD
1353 $self->log('info', "aborting phase 2 - cleanup resources");
1354
19168b91
SP
1355 $self->log('info', "migrate_cancel");
1356 eval {
0a13e08e 1357 mon_cmd($vmid, "migrate_cancel");
19168b91
SP
1358 };
1359 $self->log('info', "migrate_cancel error: $@") if $@;
1360
8a0d269b
FE
1361 my $vm_status = eval {
1362 mon_cmd($vmid, 'query-status')->{status} or die "no 'status' in result\n";
1363 };
1364 $self->log('err', "query-status error: $@") if $@;
1365
1366 # Can end up in POSTMIGRATE state if failure occurred after convergence. Try going back to
1367 # original state. Unfortunately, direct transition from POSTMIGRATE to PAUSED is not possible.
1368 if ($vm_status && $vm_status eq 'postmigrate') {
1369 if (!$self->{vm_was_paused}) {
1370 eval { mon_cmd($vmid, 'cont'); };
1371 $self->log('err', "resuming VM failed: $@") if $@;
1372 } else {
1373 $self->log('err', "VM was paused, but ended in postmigrate state");
1374 }
1375 }
1376
c04b5b04
AD
1377 my $conf = $self->{vmconf};
1378 delete $conf->{lock};
ffda963f 1379 eval { PVE::QemuConfig->write_config($vmid, $conf) };
c04b5b04
AD
1380 if (my $err = $@) {
1381 $self->log('err', $err);
1382 }
1383
af30308f 1384 # cleanup ressources on target host
3b4cf0f0 1385 if ($self->{storage_migration}) {
b74cad8a
AD
1386 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
1387 if (my $err = $@) {
1388 $self->log('err', $err);
1389 }
9b3f5a5c 1390 }
b74cad8a 1391
9b3f5a5c
FG
1392 eval { $self->cleanup_bitmaps() };
1393 if (my $err =$@) {
1394 $self->log('err', $err);
b74cad8a
AD
1395 }
1396
af30308f 1397 my $nodename = PVE::INotify::nodename();
370b05e7 1398
eef93bc5
FG
1399 if ($self->{tunnel} && $self->{tunnel}->{version} >= 2) {
1400 PVE::Tunnel::write_tunnel($self->{tunnel}, 10, 'stop');
1401 } else {
1402 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
1403 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1404 if (my $err = $@) {
1405 $self->log('err', $err);
1406 $self->{errors} = 1;
1407 }
af30308f 1408 }
386c6ba7 1409
9b3f5a5c
FG
1410 # cleanup after stopping, otherwise disks might be in-use by target VM!
1411 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
1412 if (my $err = $@) {
1413 $self->log('err', $err);
1414 }
1415
1416
386c6ba7 1417 if ($self->{tunnel}) {
e594231b 1418 eval { PVE::Tunnel::finish_tunnel($self->{tunnel}); };
386c6ba7
WL
1419 if (my $err = $@) {
1420 $self->log('err', $err);
1421 $self->{errors} = 1;
1422 }
1423 }
c04b5b04
AD
1424}
1425
16e903f2
DM
1426sub phase3 {
1427 my ($self, $vmid) = @_;
f5eb281a 1428
ad8b9d5e 1429 return;
16e903f2
DM
1430}
1431
1432sub phase3_cleanup {
1433 my ($self, $vmid, $err) = @_;
1434
1435 my $conf = $self->{vmconf};
af30308f 1436 return if $self->{phase2errors};
16e903f2 1437
1d5aaa1d
FG
1438 my $tunnel = $self->{tunnel};
1439
eef93bc5 1440 if ($self->{volume_map} && !$self->{opts}->{remote}) {
38311a1d
TL
1441 my $target_drives = $self->{target_drive};
1442
1443 # FIXME: for NBD storage migration we now only update the volid, and
1444 # not the full drivestr from the target node. Workaround that until we
1445 # got some real rescan, to avoid things like wrong format in the drive
1446 delete $conf->{$_} for keys %$target_drives;
97ece9dd 1447 PVE::QemuConfig->update_volume_ids($conf, $self->{volume_map});
38311a1d
TL
1448
1449 for my $drive (keys %$target_drives) {
1450 $conf->{$drive} = $target_drives->{$drive}->{drivestr};
1451 }
37666e4c
FE
1452 PVE::QemuConfig->write_config($vmid, $conf);
1453 }
1454
dbc9420b 1455 # transfer replication state before move config
eef93bc5
FG
1456 if (!$self->{opts}->{remote}) {
1457 $self->transfer_replication_state() if $self->{is_replicated};
1458 PVE::QemuConfig->move_config_to_node($vmid, $self->{node});
1459 $self->switch_replication_job_target() if $self->{is_replicated};
1460 }
dbc9420b 1461
5bc1e039 1462 if ($self->{livemigration}) {
3e802221
TL
1463 if ($self->{stopnbd}) {
1464 $self->log('info', "stopping NBD storage migration server on target.");
504105c6 1465 # stop nbd server on remote vm - requirement for resume since 2.9
eef93bc5
FG
1466 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 2) {
1467 PVE::Tunnel::write_tunnel($tunnel, 30, 'nbdstop');
1468 } else {
1469 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
504105c6 1470
eef93bc5
FG
1471 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1472 if (my $err = $@) {
1473 $self->log('err', $err);
1474 $self->{errors} = 1;
1475 }
504105c6
FG
1476 }
1477 }
1d5aaa1d 1478
73ed6496
AD
1479 # deletes local FDB entries if learning is disabled, they'll be re-added on target on resume
1480 PVE::QemuServer::del_nets_bridge_fdb($conf, $vmid);
1481
a183576e
FE
1482 if (!$self->{vm_was_paused}) {
1483 # config moved and nbd server stopped - now we can resume vm on target
1484 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) {
eef93bc5 1485 my $cmd = $tunnel->{version} == 1 ? "resume $vmid" : "resume";
a183576e 1486 eval {
eef93bc5 1487 PVE::Tunnel::write_tunnel($tunnel, 30, $cmd);
a183576e
FE
1488 };
1489 if (my $err = $@) {
1490 $self->log('err', $err);
1491 $self->{errors} = 1;
1492 }
1493 } else {
a20dc58a 1494 # nocheck in case target node hasn't processed the config move/rename yet
a183576e
FE
1495 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
1496 my $logf = sub {
1497 my $line = shift;
1498 $self->log('err', $line);
1499 };
1500 eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); };
1501 if (my $err = $@) {
1502 $self->log('err', $err);
1503 $self->{errors} = 1;
1504 }
1d5aaa1d 1505 }
0028391f 1506 }
ca662131 1507
0028391f
FE
1508 if (
1509 $self->{storage_migration}
1510 && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks}
1511 && $self->{running}
1512 ) {
1513 if (!$self->{vm_was_paused}) {
1514 $self->log('info', "issuing guest fstrim");
eef93bc5
FG
1515 if ($self->{opts}->{remote}) {
1516 PVE::Tunnel::write_tunnel($self->{tunnel}, 600, 'fstrim');
1517 } else {
1518 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
1519 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1520 if (my $err = $@) {
1521 $self->log('err', "fstrim failed - $err");
1522 $self->{errors} = 1;
1523 }
0028391f
FE
1524 }
1525 } else {
1526 $self->log('info', "skipping guest fstrim, because VM is paused");
a183576e 1527 }
ca662131 1528 }
b67900f1
AD
1529 }
1530
2e7fee87 1531 # close tunnel on successful migration, on error phase2_cleanup closed it
eef93bc5 1532 if ($tunnel && $tunnel->{version} == 1) {
e594231b 1533 eval { PVE::Tunnel::finish_tunnel($tunnel); };
2e7fee87
FG
1534 if (my $err = $@) {
1535 $self->log('err', $err);
1536 $self->{errors} = 1;
1537 }
eef93bc5
FG
1538 $tunnel = undef;
1539 delete $self->{tunnel};
2e7fee87
FG
1540 }
1541
fd8469f7 1542 eval {
fd8469f7
AD
1543 my $timer = 0;
1544 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
1545 $self->log('info', "Waiting for spice server migration");
1546 while (1) {
0a13e08e 1547 my $res = mon_cmd($vmid, 'query-spice');
fd8469f7
AD
1548 last if int($res->{'migrated'}) == 1;
1549 last if $timer > 50;
1550 $timer ++;
1551 usleep(200000);
769f187d 1552 }
fd8469f7
AD
1553 }
1554 };
95a4b4a9 1555
a20dc58a 1556 # always stop local VM with nocheck, since config is moved already
16e903f2
DM
1557 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
1558 if (my $err = $@) {
1559 $self->log('err', "stopping vm failed - $err");
1560 $self->{errors} = 1;
1561 }
1562
1563 # always deactivate volumes - avoid lvm LVs to be active on several nodes
1564 eval {
1565 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
1566 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
1567 };
1568 if (my $err = $@) {
1569 $self->log('err', $err);
1570 $self->{errors} = 1;
1571 }
1572
4b26ffbf 1573 my @not_replicated_volumes = $self->filter_local_volumes(undef, 0);
9b6efe43 1574
4b26ffbf
FE
1575 # destroy local copies
1576 foreach my $volid (@not_replicated_volumes) {
eef93bc5
FG
1577 # remote is cleaned up below
1578 next if $self->{opts}->{remote};
1579
ad8b9d5e
FE
1580 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
1581 if (my $err = $@) {
1582 $self->log('err', "removing local copy of '$volid' failed - $err");
1583 $self->{errors} = 1;
1584 last if $err =~ /^interrupted by signal$/;
b74cad8a 1585 }
b74cad8a
AD
1586 }
1587
16e903f2 1588 # clear migrate lock
eef93bc5
FG
1589 if ($tunnel && $tunnel->{version} >= 2) {
1590 PVE::Tunnel::write_tunnel($tunnel, 10, "unlock");
1591
1592 PVE::Tunnel::finish_tunnel($tunnel);
1593 } else {
1594 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
1595 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
1596 }
1597
1598 if ($self->{opts}->{remote} && $self->{opts}->{delete}) {
1599 eval { PVE::QemuServer::destroy_vm($self->{storecfg}, $vmid, 1, undef, 0) };
1600 warn "Failed to remove source VM - $@\n" if $@;
1601 }
16e903f2
DM
1602}
1603
1604sub final_cleanup {
1605 my ($self, $vmid) = @_;
1606
1607 # nothing to do
1608}
1609
50d8dd5d
AD
1610sub round_powerof2 {
1611 return 1 if $_[0] < 2;
1612 return 2 << int(log($_[0]-1)/log(2));
1613}
1614
16e903f2 16151;