]> git.proxmox.com Git - qemu-server.git/blame - PVE/QemuMigrate.pm
migration: add missing use statements
[qemu-server.git] / PVE / QemuMigrate.pm
CommitLineData
3ea94c60 1package PVE::QemuMigrate;
1ef75254 2
1e3baf05 3use strict;
3ea94c60 4use warnings;
6d7450cb 5
3ea94c60 6use IO::File;
1e3baf05 7use IPC::Open2;
6d7450cb
TL
8use Time::HiRes qw( usleep );
9
46e0b1eb 10use PVE::AccessControl;
3ea94c60 11use PVE::Cluster;
eef93bc5 12use PVE::Format qw(render_bytes);
4b26ffbf 13use PVE::GuestHelpers qw(safe_boolean_ne safe_string_ne);
6d7450cb 14use PVE::INotify;
46e0b1eb 15use PVE::JSONSchema;
6d7450cb
TL
16use PVE::RPCEnvironment;
17use PVE::Replication;
18use PVE::ReplicationConfig;
19use PVE::ReplicationState;
46e0b1eb 20use PVE::Storage::Plugin;
1e3baf05 21use PVE::Storage;
eef93bc5 22use PVE::StorageTunnel;
6d7450cb 23use PVE::Tools;
e594231b 24use PVE::Tunnel;
6d7450cb 25
912792e2 26use PVE::QemuConfig;
58c64ad5 27use PVE::QemuServer::CPUConfig;
e0fd2b2f 28use PVE::QemuServer::Drive;
28e6e180 29use PVE::QemuServer::Helpers qw(min_version);
3392d6ca 30use PVE::QemuServer::Machine;
0a13e08e 31use PVE::QemuServer::Monitor qw(mon_cmd);
7f8c8087 32use PVE::QemuServer::Memory qw(get_current_memory);
6d7450cb 33use PVE::QemuServer;
1e3baf05 34
6d7450cb 35use PVE::AbstractMigrate;
16e903f2 36use base qw(PVE::AbstractMigrate);
1e3baf05 37
eef93bc5
FG
38# compared against remote end's minimum version
39our $WS_TUNNEL_VERSION = 2;
40
1e3baf05 41sub fork_tunnel {
ae194a5c 42 my ($self, $ssh_forward_info) = @_;
1e3baf05 43
e594231b
FG
44 my $cmd = ['/usr/sbin/qm', 'mtunnel'];
45 my $log = sub {
46 my ($level, $msg) = @_;
47 $self->log($level, $msg);
1e3baf05 48 };
1c9d54bf 49
e594231b 50 return PVE::Tunnel::fork_ssh_tunnel($self->{rem_ssh}, $cmd, $ssh_forward_info, $log);
1e3baf05
DM
51}
52
eef93bc5
FG
53sub fork_websocket_tunnel {
54 my ($self, $storages, $bridges) = @_;
55
56 my $remote = $self->{opts}->{remote};
57 my $conn = $remote->{conn};
58
59 my $log = sub {
60 my ($level, $msg) = @_;
61 $self->log($level, $msg);
62 };
63
64 my $websocket_url = "https://$conn->{host}:$conn->{port}/api2/json/nodes/$self->{node}/qemu/$remote->{vmid}/mtunnelwebsocket";
65 my $url = "/nodes/$self->{node}/qemu/$remote->{vmid}/mtunnel";
66
67 my $tunnel_params = {
68 url => $websocket_url,
69 };
70
71 my $storage_list = join(',', keys %$storages);
72 my $bridge_list = join(',', keys %$bridges);
73
74 my $req_params = {
75 storages => $storage_list,
76 bridges => $bridge_list,
77 };
78
79 return PVE::Tunnel::fork_websocket_tunnel($conn, $url, $req_params, $tunnel_params, $log);
80}
81
05b2a4ae
FG
82# tunnel_info:
83# proto: unix (secure) or tcp (insecure/legacy compat)
84# addr: IP or UNIX socket path
85# port: optional TCP port
86# unix_sockets: additional UNIX socket paths to forward
7d730f95 87sub start_remote_tunnel {
05b2a4ae 88 my ($self, $tunnel_info) = @_;
7d730f95
FE
89
90 my $nodename = PVE::INotify::nodename();
91 my $migration_type = $self->{opts}->{migration_type};
92
93 if ($migration_type eq 'secure') {
94
05b2a4ae
FG
95 if ($tunnel_info->{proto} eq 'unix') {
96 my $ssh_forward_info = [];
7d730f95 97
05b2a4ae
FG
98 my $unix_sockets = [ keys %{$tunnel_info->{unix_sockets}} ];
99 push @$unix_sockets, $tunnel_info->{addr};
7d730f95
FE
100 for my $sock (@$unix_sockets) {
101 push @$ssh_forward_info, "$sock:$sock";
102 unlink $sock;
103 }
104
105 $self->{tunnel} = $self->fork_tunnel($ssh_forward_info);
106
107 my $unix_socket_try = 0; # wait for the socket to become ready
108 while ($unix_socket_try <= 100) {
109 $unix_socket_try++;
110 my $available = 0;
111 foreach my $sock (@$unix_sockets) {
112 if (-S $sock) {
113 $available++;
114 }
115 }
116
117 if ($available == @$unix_sockets) {
118 last;
119 }
120
121 usleep(50000);
122 }
123 if ($unix_socket_try > 100) {
124 $self->{errors} = 1;
e594231b 125 PVE::Tunnel::finish_tunnel($self->{tunnel});
05b2a4ae 126 die "Timeout, migration socket $tunnel_info->{addr} did not get ready";
7d730f95
FE
127 }
128 $self->{tunnel}->{unix_sockets} = $unix_sockets if (@$unix_sockets);
129
05b2a4ae 130 } elsif ($tunnel_info->{proto} eq 'tcp') {
7d730f95 131 my $ssh_forward_info = [];
05b2a4ae 132 if ($tunnel_info->{addr} eq "localhost") {
7d730f95
FE
133 # for backwards compatibility with older qemu-server versions
134 my $pfamily = PVE::Tools::get_host_address_family($nodename);
135 my $lport = PVE::Tools::next_migrate_port($pfamily);
05b2a4ae 136 push @$ssh_forward_info, "$lport:localhost:$tunnel_info->{port}";
7d730f95
FE
137 }
138
139 $self->{tunnel} = $self->fork_tunnel($ssh_forward_info);
140
141 } else {
05b2a4ae 142 die "unsupported protocol in migration URI: $tunnel_info->{proto}\n";
7d730f95
FE
143 }
144 } else {
145 #fork tunnel for insecure migration, to send faster commands like resume
146 $self->{tunnel} = $self->fork_tunnel();
147 }
148}
149
16e903f2
DM
150sub lock_vm {
151 my ($self, $vmid, $code, @param) = @_;
f5eb281a 152
ffda963f 153 return PVE::QemuConfig->lock_config($vmid, $code, @param);
16e903f2 154}
ff1a2432 155
e3aad441
AL
156sub target_storage_check_available {
157 my ($self, $storecfg, $targetsid, $volid) = @_;
158
159 if (!$self->{opts}->{remote}) {
160 # check if storage is available on target node
161 my $target_scfg = PVE::Storage::storage_check_enabled(
162 $storecfg,
163 $targetsid,
164 $self->{node},
165 );
166 my ($vtype) = PVE::Storage::parse_volname($storecfg, $volid);
167 die "$volid: content type '$vtype' is not available on storage '$targetsid'\n"
168 if !$target_scfg->{content}->{$vtype};
169 }
170}
171
16e903f2
DM
172sub prepare {
173 my ($self, $vmid) = @_;
ff1a2432 174
16e903f2 175 my $online = $self->{opts}->{online};
3ea94c60 176
8a5bd889 177 my $storecfg = $self->{storecfg} = PVE::Storage::config();
3ea94c60 178
e1fc368d 179 # test if VM exists
ffda963f 180 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
3ea94c60 181
9c88e854
AD
182 my $version = PVE::QemuServer::Helpers::get_node_pvecfg_version($self->{node});
183 my $cloudinit_config = $conf->{cloudinit};
184
71cc2c41
TL
185 if (
186 PVE::QemuConfig->has_cloudinit($conf) && defined($cloudinit_config)
187 && scalar(keys %$cloudinit_config) > 0
188 && !PVE::QemuServer::Helpers::pvecfg_min_version($version, 7, 2, 13)
189 ) {
190 die "target node is too old (manager <= 7.2-13) and doesn't support new cloudinit section\n";
9c88e854
AD
191 }
192
c2c96d73
FE
193 my $repl_conf = PVE::ReplicationConfig->new();
194 $self->{replication_jobcfg} = $repl_conf->find_local_replication_job($vmid, $self->{node});
195 $self->{is_replicated} = $repl_conf->check_for_existing_jobs($vmid, 1);
196
19ff3682
FE
197 if ($self->{replication_jobcfg} && defined($self->{replication_jobcfg}->{remove_job})) {
198 die "refusing to migrate replicated VM whose replication job is marked for removal\n";
199 }
200
ffda963f 201 PVE::QemuConfig->check_lock($conf);
3ea94c60 202
16e903f2
DM
203 my $running = 0;
204 if (my $pid = PVE::QemuServer::check_running($vmid)) {
b6adff33 205 die "can't migrate running VM without --online\n" if !$online;
16e903f2 206 $running = $pid;
42dbd2ee 207
c2c96d73 208 if ($self->{is_replicated} && !$self->{replication_jobcfg}) {
68980d66
FE
209 if ($self->{opts}->{force}) {
210 $self->log('warn', "WARNING: Node '$self->{node}' is not a replication target. Existing " .
211 "replication jobs will fail after migration!\n");
212 } else {
213 die "Cannot live-migrate replicated VM to node '$self->{node}' - not a replication " .
214 "target. Use 'force' to override.\n";
215 }
216 }
217
3392d6ca 218 $self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf);
7bac824e 219
58c64ad5
SR
220 # To support custom CPU types, we keep QEMU's "-cpu" parameter intact.
221 # Since the parameter itself contains no reference to a custom model,
222 # this makes migration independent of changes to "cpu-models.conf".
223 if ($conf->{cpu}) {
b53ba8d0 224 my $cpuconf = PVE::JSONSchema::parse_property_string('pve-cpu-conf', $conf->{cpu});
58c64ad5
SR
225 if ($cpuconf && PVE::QemuServer::CPUConfig::is_custom_model($cpuconf->{cputype})) {
226 $self->{forcecpu} = PVE::QemuServer::CPUConfig::get_cpu_from_running_vm($pid);
227 }
228 }
a183576e 229
6f0627d4
FS
230 # Do not treat a suspended VM as paused, as it might wake up
231 # during migration and remain paused after migration finishes.
232 $self->{vm_was_paused} = 1 if PVE::QemuServer::vm_is_paused($vmid, 0);
3ea94c60 233 }
58c64ad5 234
92cd9b18
DC
235 my ($loc_res, $mapped_res, $missing_mappings_by_node) = PVE::QemuServer::check_local_resources($conf, 1);
236 my $blocking_resources = [];
237 for my $res ($loc_res->@*) {
238 if (!grep($res, $mapped_res->@*)) {
239 push $blocking_resources->@*, $res;
240 }
241 }
242 if (scalar($blocking_resources->@*)) {
16e903f2 243 if ($self->{running} || !$self->{opts}->{force}) {
92cd9b18 244 die "can't migrate VM which uses local devices: " . join(", ", $blocking_resources->@*) . "\n";
16e903f2
DM
245 } else {
246 $self->log('info', "migrating VM which uses local devices");
247 }
3ea94c60
DM
248 }
249
92cd9b18
DC
250 if (scalar($mapped_res->@*)) {
251 my $missing_mappings = $missing_mappings_by_node->{$self->{node}};
252 if ($running) {
253 die "can't migrate running VM which uses mapped devices: " . join(", ", $mapped_res->@*) . "\n";
254 } elsif (scalar($missing_mappings->@*)) {
255 die "can't migrate to '$self->{node}': missing mapped devices " . join(", ", $missing_mappings->@*) . "\n";
256 } else {
257 $self->log('info', "migrating VM which uses mapped local devices");
258 }
259 }
260
bb30eedf
MF
261 my $vga = PVE::QemuServer::parse_vga($conf->{vga});
262 if ($running && $vga->{'clipboard'} && $vga->{'clipboard'} eq 'vnc') {
263 die "VMs with 'clipboard' set to 'vnc' are not live migratable!\n";
264 }
265
ff1a2432 266 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
eef93bc5
FG
267
268 my $storages = {};
29701766
FG
269 foreach my $volid (@$vollist) {
270 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
271
eef93bc5 272 # check if storage is available on source node
8a5bd889 273 my $scfg = PVE::Storage::storage_check_enabled($storecfg, $sid);
d213ba29 274
95b3583b 275 my $targetsid = $sid;
eef93bc5
FG
276 # NOTE: local ignores shared mappings, remote maps them
277 if (!$scfg->{shared} || $self->{opts}->{remote}) {
82a03671 278 $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid);
d213ba29
FE
279 }
280
eef93bc5 281 $storages->{$targetsid} = 1;
24b84b47 282
e3aad441 283 $self->target_storage_check_available($storecfg, $targetsid, $volid);
73f5ee92
FG
284
285 if ($scfg->{shared}) {
286 # PVE::Storage::activate_storage checks this for non-shared storages
287 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
288 warn "Used shared storage '$sid' is not online on source node!\n"
289 if !$plugin->check_connection($sid, $scfg);
73f5ee92 290 }
29701766 291 }
3ea94c60 292
eef93bc5
FG
293 if ($self->{opts}->{remote}) {
294 # test & establish websocket connection
295 my $bridges = map_bridges($conf, $self->{opts}->{bridgemap}, 1);
296 my $tunnel = $self->fork_websocket_tunnel($storages, $bridges);
297 my $min_version = $tunnel->{version} - $tunnel->{age};
298 $self->log('info', "local WS tunnel version: $WS_TUNNEL_VERSION");
299 $self->log('info', "remote WS tunnel version: $tunnel->{version}");
300 $self->log('info', "minimum required WS tunnel version: $min_version");
301 die "Remote tunnel endpoint not compatible, upgrade required\n"
302 if $WS_TUNNEL_VERSION < $min_version;
303 die "Remote tunnel endpoint too old, upgrade required\n"
304 if $WS_TUNNEL_VERSION > $tunnel->{version};
305
306 print "websocket tunnel started\n";
307 $self->{tunnel} = $tunnel;
308 } else {
309 # test ssh connection
310 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
311 eval { $self->cmd_quiet($cmd); };
312 die "Can't connect to destination address using public key\n" if $@;
313 }
ff1a2432 314
16e903f2 315 return $running;
3ea94c60
DM
316}
317
d10b78f4 318sub scan_local_volumes {
16e903f2
DM
319 my ($self, $vmid) = @_;
320
16e903f2
DM
321 my $conf = $self->{vmconf};
322
dabf2473 323 # local volumes which have been copied
37666e4c 324 # and their old_id => new_id pairs
37666e4c 325 $self->{volume_map} = {};
d10b78f4 326 $self->{local_volumes} = {};
3ea94c60 327
b10afa31 328 my $storecfg = $self->{storecfg};
3ea94c60 329 eval {
dabf2473 330 # found local volumes and their origin
d10b78f4 331 my $local_volumes = $self->{local_volumes};
5bf7f0f1
FG
332 my $local_volumes_errors = {};
333 my $other_errors = [];
334 my $abort = 0;
07f2e57c 335 my $path_to_volid = {};
3ea94c60 336
5bf7f0f1
FG
337 my $log_error = sub {
338 my ($msg, $volid) = @_;
339
340 if (defined($volid)) {
341 $local_volumes_errors->{$volid} = $msg;
342 } else {
343 push @$other_errors, $msg;
344 }
345 $abort = 1;
346 };
347
c2c96d73 348 my $replicatable_volumes = !$self->{replication_jobcfg} ? {}
2cd808d3 349 : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
4b26ffbf
FE
350 foreach my $volid (keys %{$replicatable_volumes}) {
351 $local_volumes->{$volid}->{replicated} = 1;
352 }
b9f44d27 353
3629c19d 354 my $test_volid = sub {
aee6abe5 355 my ($volid, $attr) = @_;
3ea94c60 356
5bf7f0f1 357 if ($volid =~ m|^/|) {
ec82e3ee 358 return if $attr->{shared};
6f58fce9 359 $local_volumes->{$volid}->{ref} = 'config';
5bf7f0f1
FG
360 die "local file/device\n";
361 }
3ea94c60 362
aee6abe5
DM
363 my $snaprefs = $attr->{referenced_in_snapshot};
364
365 if ($attr->{cdrom}) {
5bf7f0f1
FG
366 if ($volid eq 'cdrom') {
367 my $msg = "can't migrate local cdrom drive";
6e9c4929 368 if (defined($snaprefs) && !$attr->{is_attached}) {
aee6abe5 369 my $snapnames = join(', ', sort keys %$snaprefs);
5009a8c7 370 $msg .= " (referenced in snapshot - $snapnames)";
aee6abe5 371 }
5bf7f0f1
FG
372 &$log_error("$msg\n");
373 return;
374 }
3ea94c60 375 return if $volid eq 'none';
3ea94c60
DM
376 }
377
378 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
379
16e903f2 380 # check if storage is available on both nodes
0d2db084 381 my $scfg = PVE::Storage::storage_check_enabled($storecfg, $sid);
d213ba29 382
95b3583b 383 my $targetsid = $sid;
eef93bc5
FG
384 # NOTE: local ignores shared mappings, remote maps them
385 if (!$scfg->{shared} || $self->{opts}->{remote}) {
82a03671 386 $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid);
d213ba29
FE
387 }
388
e3aad441
AL
389 $self->target_storage_check_available($storecfg, $targetsid, $volid);
390 return if $scfg->{shared} && !$self->{opts}->{remote};
3ea94c60 391
6e9c4929
AL
392 $local_volumes->{$volid}->{ref} = 'pending' if $attr->{referenced_in_pending};
393 $local_volumes->{$volid}->{ref} = 'snapshot' if $attr->{referenced_in_snapshot};
394 $local_volumes->{$volid}->{ref} = 'unused' if $attr->{is_unused};
395 $local_volumes->{$volid}->{ref} = 'attached' if $attr->{is_attached};
f9dde219 396 $local_volumes->{$volid}->{ref} = 'generated' if $attr->{is_tpmstate};
d62fcf74 397
a0dbed5a
AL
398 $local_volumes->{$volid}->{bwlimit} = $self->get_bwlimit($sid, $targetsid);
399 $local_volumes->{$volid}->{targetsid} = $targetsid;
400
401 $local_volumes->{$volid}->@{qw(size format)} = PVE::Storage::volume_size_info($storecfg, $volid);
402
cc1a3820
FE
403 $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0;
404
a6be63ac
FE
405 $local_volumes->{$volid}->{drivename} = $attr->{drivename}
406 if $attr->{drivename};
407
e9f12f96
FE
408 # If with_snapshots is not set for storage migrate, it tries to use
409 # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size
410 # back to qcow2 is currently not possible.
411 $local_volumes->{$volid}->{snapshots} = ($local_volumes->{$volid}->{format} =~ /^(?:qcow2|vmdk)$/);
412
9e93a63f
ML
413 if ($attr->{cdrom}) {
414 if ($volid =~ /vm-\d+-cloudinit/) {
415 $local_volumes->{$volid}->{ref} = 'generated';
416 return;
417 }
418 die "local cdrom image\n";
419 }
3629c19d 420
b10afa31 421 my ($path, $owner) = PVE::Storage::path($storecfg, $volid);
3ea94c60 422
5bf7f0f1 423 die "owned by other VM (owner = VM $owner)\n"
b10afa31 424 if !$owner || ($owner != $vmid);
3ea94c60 425
07f2e57c
AL
426 $path_to_volid->{$path}->{$volid} = 1;
427
b24f07d4
FE
428 return if $attr->{is_vmstate};
429
aee6abe5 430 if (defined($snaprefs)) {
5eca0c36
FE
431 $local_volumes->{$volid}->{snapshots} = 1;
432
3629c19d
DM
433 # we cannot migrate shapshots on local storage
434 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
435
5cbf4d72
FE
436 die "online storage migration not possible if non-replicated snapshot exists\n"
437 if $self->{running} && !$local_volumes->{$volid}->{replicated};
438
eef93bc5
FG
439 die "remote migration with snapshots not supported yet\n" if $self->{opts}->{remote};
440
205dbf39
WB
441 if (!($scfg->{type} eq 'zfspool'
442 || ($scfg->{type} eq 'btrfs' && $local_volumes->{$volid}->{format} eq 'raw')
443 || $local_volumes->{$volid}->{format} eq 'qcow2'
444 )) {
5bf7f0f1 445 die "non-migratable snapshot exists\n";
3629c19d 446 }
3629c19d 447 }
3a7bc9e2
FG
448
449 die "referenced by linked clone(s)\n"
b10afa31 450 if PVE::Storage::volume_is_base_and_used($storecfg, $volid);
3629c19d
DM
451 };
452
0b7a0b78 453 PVE::QemuServer::foreach_volid($conf, sub {
aee6abe5
DM
454 my ($volid, $attr) = @_;
455 eval { $test_volid->($volid, $attr); };
456 if (my $err = $@) {
457 &$log_error($err, $volid);
458 }
459 });
3ea94c60 460
07f2e57c
AL
461 for my $path (keys %$path_to_volid) {
462 my @volids = keys $path_to_volid->{$path}->%*;
1e540583 463 die "detected not supported aliased volumes: '" . join("', '", @volids) . "'\n"
07f2e57c
AL
464 if (scalar(@volids) > 1);
465 }
466
dabf2473 467 foreach my $vol (sort keys %$local_volumes) {
b9f44d27 468 my $type = $replicatable_volumes->{$vol} ? 'local, replicated' : 'local';
6f58fce9 469 my $ref = $local_volumes->{$vol}->{ref};
6e9c4929 470 if ($ref eq 'attached') {
dbc9420b
DM
471 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
472 if $self->{running} && !$self->{opts}->{"with-local-disks"};
6e9c4929
AL
473 $self->log('info', "found $type disk '$vol' (attached)\n");
474 } elsif ($ref eq 'unused') {
475 $self->log('info', "found $type disk '$vol' (unused)\n");
6f58fce9 476 } elsif ($ref eq 'snapshot') {
b9f44d27 477 $self->log('info', "found $type disk '$vol' (referenced by snapshot(s))\n");
6e9c4929
AL
478 } elsif ($ref eq 'pending') {
479 $self->log('info', "found $type disk '$vol' (pending change)\n");
9e93a63f
ML
480 } elsif ($ref eq 'generated') {
481 $self->log('info', "found generated disk '$vol' (in current VM config)\n");
d62fcf74 482 } else {
b9f44d27 483 $self->log('info', "found $type disk '$vol'\n");
d62fcf74
FG
484 }
485 }
486
5bf7f0f1
FG
487 foreach my $vol (sort keys %$local_volumes_errors) {
488 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
489 }
490 foreach my $err (@$other_errors) {
491 $self->log('warn', "$err");
492 }
493
5bf7f0f1
FG
494 if ($abort) {
495 die "can't migrate VM - check log\n";
496 }
497
c4d2d6c1 498 # additional checks for local storage
dabf2473 499 foreach my $volid (keys %$local_volumes) {
3ea94c60 500 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
b10afa31 501 my $scfg = PVE::Storage::storage_config($storecfg, $sid);
3ea94c60 502
205dbf39 503 my $migratable = $scfg->{type} =~ /^(?:dir|btrfs|zfspool|lvmthin|lvm)$/;
c4d2d6c1 504
eef93bc5
FG
505 # TODO: what is this even here for?
506 $migratable = 1 if $self->{opts}->{remote};
507
37a6dc78 508 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
c4d2d6c1 509 if !$migratable;
d5604092 510
c4d2d6c1 511 # image is a linked clone on local storage, se we can't migrate.
b10afa31 512 if (my $basename = (PVE::Storage::parse_volname($storecfg, $volid))[3]) {
c4d2d6c1 513 die "can't migrate '$volid' as it's a clone of '$basename'";
d5604092 514 }
3ea94c60
DM
515 }
516
eb3acec8 517 foreach my $volid (sort keys %$local_volumes) {
9e93a63f 518 my $ref = $local_volumes->{$volid}->{ref};
6e9c4929 519 if ($self->{running} && $ref eq 'attached') {
efe0d457 520 $local_volumes->{$volid}->{migration_mode} = 'online';
ad8b9d5e 521 } elsif ($self->{running} && $ref eq 'generated') {
104f47a9 522 # offline migrate the cloud-init ISO and don't regenerate on VM start
f9dde219
SR
523 #
524 # tpmstate will also be offline migrated first, and in case of
525 # live migration then updated by QEMU/swtpm if necessary
104f47a9 526 $local_volumes->{$volid}->{migration_mode} = 'offline';
b74cad8a 527 } else {
d10b78f4 528 $local_volumes->{$volid}->{migration_mode} = 'offline';
b74cad8a 529 }
3ea94c60
DM
530 }
531 };
d10b78f4
FE
532 die "Problem found while scanning volumes - $@" if $@;
533}
534
a6be63ac
FE
535sub handle_replication {
536 my ($self, $vmid) = @_;
537
538 my $conf = $self->{vmconf};
539 my $local_volumes = $self->{local_volumes};
540
541 return if !$self->{replication_jobcfg};
eef93bc5
FG
542
543 die "can't migrate VM with replicated volumes to remote cluster/node\n"
544 if $self->{opts}->{remote};
545
a6be63ac
FE
546 if ($self->{running}) {
547
548 my $version = PVE::QemuServer::kvm_user_version();
549 if (!min_version($version, 4, 2)) {
550 die "can't live migrate VM with replicated volumes, pve-qemu to old (< 4.2)!\n"
551 }
552
553 my @live_replicatable_volumes = $self->filter_local_volumes('online', 1);
554 foreach my $volid (@live_replicatable_volumes) {
555 my $drive = $local_volumes->{$volid}->{drivename};
556 die "internal error - no drive for '$volid'\n" if !defined($drive);
557
558 my $bitmap = "repl_$drive";
559
560 # start tracking before replication to get full delta + a few duplicates
561 $self->log('info', "$drive: start tracking writes using block-dirty-bitmap '$bitmap'");
562 mon_cmd($vmid, 'block-dirty-bitmap-add', node => "drive-$drive", name => $bitmap);
563
564 # other info comes from target node in phase 2
565 $self->{target_drive}->{$drive}->{bitmap} = $bitmap;
566 }
567 }
568 $self->log('info', "replicating disk images");
569
570 my $start_time = time();
571 my $logfunc = sub { $self->log('info', shift) };
572 my $actual_replicated_volumes = PVE::Replication::run_replication(
573 'PVE::QemuConfig', $self->{replication_jobcfg}, $start_time, $start_time, $logfunc);
574
575 # extra safety check
576 my @replicated_volumes = $self->filter_local_volumes(undef, 1);
577 foreach my $volid (@replicated_volumes) {
578 die "expected volume '$volid' to get replicated, but it wasn't\n"
579 if !$actual_replicated_volumes->{$volid};
580 }
581}
582
3276a434
FE
583sub config_update_local_disksizes {
584 my ($self) = @_;
585
586 my $conf = $self->{vmconf};
587 my $local_volumes = $self->{local_volumes};
588
589 PVE::QemuConfig->foreach_volume($conf, sub {
590 my ($key, $drive) = @_;
f9dde219
SR
591 # skip special disks, will be handled later
592 return if $key eq 'efidisk0';
593 return if $key eq 'tpmstate0';
3276a434
FE
594
595 my $volid = $drive->{file};
596 return if !defined($local_volumes->{$volid}); # only update sizes for local volumes
597
598 my ($updated, $msg) = PVE::QemuServer::Drive::update_disksize($drive, $local_volumes->{$volid}->{size});
599 if (defined($updated)) {
600 $conf->{$key} = PVE::QemuServer::print_drive($updated);
601 $self->log('info', "drive '$key': $msg");
602 }
603 });
604
605 # we want to set the efidisk size in the config to the size of the
606 # real OVMF_VARS.fd image, else we can create a too big image, which does not work
607 if (defined($conf->{efidisk0})) {
608 PVE::QemuServer::update_efidisk_size($conf);
609 }
f9dde219
SR
610
611 # TPM state might have an irregular filesize, to avoid problems on transfer
612 # we always assume the static size of 4M to allocate on the target
613 if (defined($conf->{tpmstate0})) {
614 PVE::QemuServer::update_tpmstate_size($conf);
615 }
3276a434
FE
616}
617
d10b78f4 618sub filter_local_volumes {
4b26ffbf 619 my ($self, $migration_mode, $replicated) = @_;
d10b78f4
FE
620
621 my $volumes = $self->{local_volumes};
622 my @filtered_volids;
623
624 foreach my $volid (sort keys %{$volumes}) {
625 next if defined($migration_mode) && safe_string_ne($volumes->{$volid}->{migration_mode}, $migration_mode);
4b26ffbf 626 next if defined($replicated) && safe_boolean_ne($volumes->{$volid}->{replicated}, $replicated);
d10b78f4
FE
627 push @filtered_volids, $volid;
628 }
629
630 return @filtered_volids;
631}
632
633sub sync_offline_local_volumes {
634 my ($self) = @_;
635
636 my $local_volumes = $self->{local_volumes};
4b26ffbf 637 my @volids = $self->filter_local_volumes('offline', 0);
d10b78f4
FE
638
639 my $storecfg = $self->{storecfg};
640 my $opts = $self->{opts};
641
642 $self->log('info', "copying local disk images") if scalar(@volids);
643
644 foreach my $volid (@volids) {
eef93bc5 645 my $new_volid;
d10b78f4 646
eef93bc5
FG
647 my $opts = $self->{opts};
648 if ($opts->{remote}) {
649 my $log = sub {
650 my ($level, $msg) = @_;
651 $self->log($level, $msg);
652 };
653
654 $new_volid = PVE::StorageTunnel::storage_migrate(
655 $self->{tunnel},
656 $storecfg,
657 $volid,
658 $self->{vmid},
659 $opts->{remote}->{vmid},
660 $local_volumes->{$volid},
661 $log,
662 );
663 } else {
664 my $targetsid = $local_volumes->{$volid}->{targetsid};
665
666 my $bwlimit = $local_volumes->{$volid}->{bwlimit};
667 $bwlimit = $bwlimit * 1024 if defined($bwlimit); # storage_migrate uses bps
668
669 my $storage_migrate_opts = {
670 'ratelimit_bps' => $bwlimit,
671 'insecure' => $opts->{migration_type} eq 'insecure',
672 'with_snapshots' => $local_volumes->{$volid}->{snapshots},
673 'allow_rename' => !$local_volumes->{$volid}->{is_vmstate},
674 };
675
676 my $logfunc = sub { $self->log('info', $_[0]); };
677 $new_volid = eval {
678 PVE::Storage::storage_migrate(
679 $storecfg,
680 $volid,
681 $self->{ssh_info},
682 $targetsid,
683 $storage_migrate_opts,
684 $logfunc,
685 );
686 };
687 if (my $err = $@) {
688 die "storage migration for '$volid' to storage '$targetsid' failed - $err\n";
689 }
d10b78f4
FE
690 }
691
692 $self->{volume_map}->{$volid} = $new_volid;
693 $self->log('info', "volume '$volid' is '$new_volid' on the target\n");
694
695 eval { PVE::Storage::deactivate_volumes($storecfg, [$volid]); };
696 if (my $err = $@) {
697 $self->log('warn', $err);
698 }
699 }
3ea94c60
DM
700}
701
b74cad8a
AD
702sub cleanup_remotedisks {
703 my ($self) = @_;
704
eef93bc5
FG
705 if ($self->{opts}->{remote}) {
706 PVE::Tunnel::finish_tunnel($self->{tunnel}, 1);
707 delete $self->{tunnel};
708 return;
709 }
710
4b26ffbf
FE
711 my $local_volumes = $self->{local_volumes};
712
eb5751ba 713 foreach my $volid (values %{$self->{volume_map}}) {
9b6efe43 714 # don't clean up replicated disks!
4b26ffbf 715 next if $local_volumes->{$volid}->{replicated};
b74cad8a 716
eb5751ba 717 my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
b74cad8a
AD
718
719 my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
720
721 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
722 if (my $err = $@) {
723 $self->log('err', $err);
724 $self->{errors} = 1;
725 }
726 }
727}
728
9b6efe43
FG
729sub cleanup_bitmaps {
730 my ($self) = @_;
7f5fb49a 731 foreach my $drive (keys %{$self->{target_drive}}) {
9b6efe43
FG
732 my $bitmap = $self->{target_drive}->{$drive}->{bitmap};
733 next if !$bitmap;
734 $self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'");
735 mon_cmd($self->{vmid}, 'block-dirty-bitmap-remove', node => "drive-$drive", name => $bitmap);
736 }
737}
738
1e3baf05 739sub phase1 {
16e903f2 740 my ($self, $vmid) = @_;
1e3baf05 741
16e903f2 742 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
1e3baf05 743
16e903f2 744 my $conf = $self->{vmconf};
1e3baf05
DM
745
746 # set migrate lock in config file
1858638f 747 $conf->{lock} = 'migrate';
ffda963f 748 PVE::QemuConfig->write_config($vmid, $conf);
1e3baf05 749
d10b78f4 750 $self->scan_local_volumes($vmid);
3276a434
FE
751
752 # fix disk sizes to match their actual size and write changes,
753 # so that the target allocates the correct volumes
754 $self->config_update_local_disksizes();
68b108ee 755 PVE::QemuConfig->write_config($vmid, $conf);
d10b78f4 756
a6be63ac
FE
757 $self->handle_replication($vmid);
758
d10b78f4 759 $self->sync_offline_local_volumes();
eef93bc5 760 $self->phase1_remote($vmid) if $self->{opts}->{remote};
1e3baf05
DM
761};
762
eef93bc5
FG
763sub map_bridges {
764 my ($conf, $map, $scan_only) = @_;
765
766 my $bridges = {};
767
768 foreach my $opt (keys %$conf) {
769 next if $opt !~ m/^net\d+$/;
770
771 next if !$conf->{$opt};
772 my $d = PVE::QemuServer::parse_net($conf->{$opt});
773 next if !$d || !$d->{bridge};
774
775 my $target_bridge = PVE::JSONSchema::map_id($map, $d->{bridge});
776 $bridges->{$target_bridge}->{$opt} = $d->{bridge};
777
778 next if $scan_only;
779
780 $d->{bridge} = $target_bridge;
781 $conf->{$opt} = PVE::QemuServer::print_net($d);
782 }
783
784 return $bridges;
785}
786
787sub phase1_remote {
788 my ($self, $vmid) = @_;
789
790 my $remote_conf = PVE::QemuConfig->load_config($vmid);
791 PVE::QemuConfig->update_volume_ids($remote_conf, $self->{volume_map});
792
793 my $bridges = map_bridges($remote_conf, $self->{opts}->{bridgemap});
794 for my $target (keys $bridges->%*) {
795 for my $nic (keys $bridges->{$target}->%*) {
796 $self->log('info', "mapped: $nic from $bridges->{$target}->{$nic} to $target");
797 }
798 }
799
800 my @online_local_volumes = $self->filter_local_volumes('online');
801
802 my $storage_map = $self->{opts}->{storagemap};
803 $self->{nbd} = {};
804 PVE::QemuConfig->foreach_volume($remote_conf, sub {
805 my ($ds, $drive) = @_;
806
807 # TODO eject CDROM?
808 return if PVE::QemuServer::drive_is_cdrom($drive);
809
810 my $volid = $drive->{file};
811 return if !$volid;
812
813 return if !grep { $_ eq $volid} @online_local_volumes;
814
815 my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
816 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
817 my $source_format = PVE::QemuServer::qemu_img_format($scfg, $volname);
818
819 # set by target cluster
820 my $oldvolid = delete $drive->{file};
821 delete $drive->{format};
822
823 my $targetsid = PVE::JSONSchema::map_id($storage_map, $storeid);
824
825 my $params = {
826 format => $source_format,
827 storage => $targetsid,
828 drive => $drive,
829 };
830
831 $self->log('info', "Allocating volume for drive '$ds' on remote storage '$targetsid'..");
832 my $res = PVE::Tunnel::write_tunnel($self->{tunnel}, 600, 'disk', $params);
833
834 $self->log('info', "volume '$oldvolid' is '$res->{volid}' on the target\n");
835 $remote_conf->{$ds} = $res->{drivestr};
836 $self->{nbd}->{$ds} = $res;
837 });
838
839 my $conf_str = PVE::QemuServer::write_vm_config("remote", $remote_conf);
840
841 # TODO expose in PVE::Firewall?
842 my $vm_fw_conf_path = "/etc/pve/firewall/$vmid.fw";
843 my $fw_conf_str;
844 $fw_conf_str = PVE::Tools::file_get_contents($vm_fw_conf_path)
845 if -e $vm_fw_conf_path;
846 my $params = {
847 conf => $conf_str,
848 'firewall-config' => $fw_conf_str,
849 };
850
851 PVE::Tunnel::write_tunnel($self->{tunnel}, 10, 'config', $params);
852}
853
16e903f2
DM
854sub phase1_cleanup {
855 my ($self, $vmid, $err) = @_;
856
857 $self->log('info', "aborting phase 1 - cleanup resources");
858
1858638f
DM
859 my $conf = $self->{vmconf};
860 delete $conf->{lock};
ffda963f 861 eval { PVE::QemuConfig->write_config($vmid, $conf) };
16e903f2
DM
862 if (my $err = $@) {
863 $self->log('err', $err);
864 }
f5eb281a 865
eb5751ba
FE
866 eval { $self->cleanup_remotedisks() };
867 if (my $err = $@) {
868 $self->log('err', $err);
16e903f2 869 }
9b6efe43
FG
870
871 eval { $self->cleanup_bitmaps() };
872 if (my $err =$@) {
873 $self->log('err', $err);
874 }
16e903f2
DM
875}
876
05b2a4ae
FG
877sub phase2_start_local_cluster {
878 my ($self, $vmid, $params) = @_;
1e3baf05 879
16e903f2 880 my $conf = $self->{vmconf};
c3417e3b 881 my $local_volumes = $self->{local_volumes};
efe0d457
FE
882 my @online_local_volumes = $self->filter_local_volumes('online');
883
05b2a4ae
FG
884 my $start = $params->{start_params};
885 my $migrate = $params->{migrate_opts};
16e903f2 886
46a84fd4 887 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
1e3baf05 888
05b2a4ae 889 my $tunnel_info = {};
7e8dcf2c 890
19672434 891 ## start on remote node
95a4b4a9
AD
892 my $cmd = [@{$self->{rem_ssh}}];
893
05b2a4ae
FG
894 push @$cmd, 'qm', 'start', $vmid;
895
896 if ($start->{skiplock}) {
897 push @$cmd, '--skiplock';
95a4b4a9
AD
898 }
899
05b2a4ae 900 push @$cmd, '--migratedfrom', $migrate->{migratedfrom};
1c9d54bf 901
05b2a4ae 902 push @$cmd, '--migration_type', $migrate->{type};
2de2d6f7 903
05b2a4ae
FG
904 push @$cmd, '--migration_network', $migrate->{network}
905 if $migrate->{network};
2de2d6f7 906
05b2a4ae 907 push @$cmd, '--stateuri', $start->{statefile};
2de2d6f7 908
05b2a4ae
FG
909 if ($start->{forcemachine}) {
910 push @$cmd, '--machine', $start->{forcemachine};
42668529
DM
911 }
912
05b2a4ae
FG
913 if ($start->{forcecpu}) {
914 push @$cmd, '--force-cpu', $start->{forcecpu};
58c64ad5
SR
915 }
916
efe0d457 917 if ($self->{storage_migration}) {
4530494b 918 push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1');
b74cad8a
AD
919 }
920
86b8228b 921 my $spice_port;
05b2a4ae 922 my $input = "nbd_protocol_version: $migrate->{nbd_proto_version}\n";
fd95d780 923
13d121d7
FE
924 my @offline_local_volumes = $self->filter_local_volumes('offline');
925 for my $volid (@offline_local_volumes) {
926 my $drivename = $local_volumes->{$volid}->{drivename};
927 next if !$drivename || !$conf->{$drivename};
928
929 my $new_volid = $self->{volume_map}->{$volid};
930 next if !$new_volid || $volid eq $new_volid;
931
932 # FIXME PVE 8.x only use offline_volume variant once all targets can handle it
933 if ($drivename eq 'tpmstate0') {
934 $input .= "$drivename: $new_volid\n"
935 } else {
936 $input .= "offline_volume: $drivename: $new_volid\n"
937 }
fd95d780
FG
938 }
939
05b2a4ae 940 $input .= "spice_ticket: $migrate->{spice_ticket}\n" if $migrate->{spice_ticket};
cee620e6 941
4b26ffbf
FE
942 my @online_replicated_volumes = $self->filter_local_volumes('online', 1);
943 foreach my $volid (@online_replicated_volumes) {
efe0d457 944 $input .= "replicated_volume: $volid\n";
88126be3
FG
945 }
946
efbbe59d
FE
947 my $handle_storage_migration_listens = sub {
948 my ($drive_key, $drivestr, $nbd_uri) = @_;
949
950 $self->{stopnbd} = 1;
951 $self->{target_drive}->{$drive_key}->{drivestr} = $drivestr;
952 $self->{target_drive}->{$drive_key}->{nbd_uri} = $nbd_uri;
953
954 my $source_drive = PVE::QemuServer::parse_drive($drive_key, $conf->{$drive_key});
955 my $target_drive = PVE::QemuServer::parse_drive($drive_key, $drivestr);
956 my $source_volid = $source_drive->{file};
957 my $target_volid = $target_drive->{file};
958
959 $self->{volume_map}->{$source_volid} = $target_volid;
960 $self->log('info', "volume '$source_volid' is '$target_volid' on the target\n");
961 };
962
88126be3 963 my $target_replicated_volumes = {};
86b8228b 964
7c14dcae
DM
965 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
966 # instead we pipe it through STDIN
7827de41 967 my $exitcode = PVE::Tools::run_command($cmd, input => $input, outfunc => sub {
1e3baf05
DM
968 my $line = shift;
969
05b2a4ae
FG
970 if ($line =~ m/^migration listens on (tcp):(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
971 $tunnel_info->{addr} = $2;
972 $tunnel_info->{port} = int($3);
973 $tunnel_info->{proto} = $1;
1c9d54bf 974 }
05b2a4ae
FG
975 elsif ($line =~ m!^migration listens on (unix):(/run/qemu-server/(\d+)\.migrate)$!) {
976 $tunnel_info->{addr} = $2;
977 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $3;
978 $tunnel_info->{proto} = $1;
5bc1e039
SP
979 }
980 elsif ($line =~ m/^migration listens on port (\d+)$/) {
05b2a4ae
FG
981 $tunnel_info->{addr} = "localhost";
982 $tunnel_info->{port} = int($1);
983 $tunnel_info->{proto} = "tcp";
5bc1e039 984 }
f3a483b6 985 elsif ($line =~ m/^spice listens on port (\d+)$/) {
86b8228b 986 $spice_port = int($1);
1e3baf05 987 }
769f187d 988 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
8b02e568 989 my $drivestr = $4;
b74cad8a
AD
990 my $nbd_uri = "nbd:$1:$2:exportname=$3";
991 my $targetdrive = $3;
992 $targetdrive =~ s/drive-//g;
993
efbbe59d 994 $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri);
7827de41
ML
995 } elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) {
996 my $drivestr = $4;
997 die "Destination UNIX socket's VMID does not match source VMID" if $vmid ne $2;
998 my $nbd_unix_addr = $1;
999 my $nbd_uri = "nbd:unix:$nbd_unix_addr:exportname=$3";
1000 my $targetdrive = $3;
1001 $targetdrive =~ s/drive-//g;
b74cad8a 1002
efbbe59d 1003 $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri);
05b2a4ae 1004 $tunnel_info->{unix_sockets}->{$nbd_unix_addr} = 1;
88126be3
FG
1005 } elsif ($line =~ m/^re-using replicated volume: (\S+) - (.*)$/) {
1006 my $drive = $1;
1007 my $volid = $2;
1008 $target_replicated_volumes->{$volid} = $drive;
8bf30c2a
SR
1009 } elsif ($line =~ m/^QEMU: (.*)$/) {
1010 $self->log('info', "[$self->{node}] $1\n");
b74cad8a 1011 }
ab399b7c
AD
1012 }, errfunc => sub {
1013 my $line = shift;
8bf30c2a 1014 $self->log('info', "[$self->{node}] $line");
6e0216d8
SR
1015 }, noerr => 1);
1016
1017 die "remote command failed with exit code $exitcode\n" if $exitcode;
1e3baf05 1018
05b2a4ae 1019 die "unable to detect remote migration address\n" if !$tunnel_info->{addr} || !$tunnel_info->{proto};
1ef75254 1020
4b26ffbf 1021 if (scalar(keys %$target_replicated_volumes) != scalar(@online_replicated_volumes)) {
88126be3
FG
1022 die "number of replicated disks on source and target node do not match - target node too old?\n"
1023 }
1024
05b2a4ae
FG
1025 return ($tunnel_info, $spice_port);
1026}
1027
eef93bc5
FG
1028sub phase2_start_remote_cluster {
1029 my ($self, $vmid, $params) = @_;
1030
1031 die "insecure migration to remote cluster not implemented\n"
1032 if $params->{migrate_opts}->{type} ne 'websocket';
1033
1034 my $remote_vmid = $self->{opts}->{remote}->{vmid};
1035
1036 # like regular start but with some overhead accounted for
7f8c8087
AD
1037 my $memory = get_current_memory($self->{vmconf}->{memory});
1038 my $timeout = PVE::QemuServer::Helpers::config_aware_timeout($self->{vmconf}, $memory) + 10;
eef93bc5
FG
1039
1040 my $res = PVE::Tunnel::write_tunnel($self->{tunnel}, $timeout, "start", $params);
1041
1042 foreach my $drive (keys %{$res->{drives}}) {
1043 $self->{stopnbd} = 1;
1044 $self->{target_drive}->{$drive}->{drivestr} = $res->{drives}->{$drive}->{drivestr};
1045 my $nbd_uri = $res->{drives}->{$drive}->{nbd_uri};
1046 die "unexpected NBD uri for '$drive': $nbd_uri\n"
1047 if $nbd_uri !~ s!/run/qemu-server/$remote_vmid\_!/run/qemu-server/$vmid\_!;
1048
1049 $self->{target_drive}->{$drive}->{nbd_uri} = $nbd_uri;
1050 }
1051
1052 return ($res->{migrate}, $res->{spice_port});
1053}
1054
05b2a4ae
FG
1055sub phase2 {
1056 my ($self, $vmid) = @_;
1057
1058 my $conf = $self->{vmconf};
eef93bc5 1059 my $local_volumes = $self->{local_volumes};
05b2a4ae
FG
1060
1061 # version > 0 for unix socket support
1062 my $nbd_protocol_version = 1;
1063
1064 my $spice_ticket;
1065 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
1066 my $res = mon_cmd($vmid, 'query-spice');
1067 $spice_ticket = $res->{ticket};
1068 }
1069
1070 my $migration_type = $self->{opts}->{migration_type};
1071 my $state_uri = $migration_type eq 'insecure' ? 'tcp' : 'unix';
1072
1073 my $params = {
1074 start_params => {
1075 statefile => $state_uri,
1076 forcemachine => $self->{forcemachine},
1077 forcecpu => $self->{forcecpu},
1078 skiplock => 1,
1079 },
1080 migrate_opts => {
1081 spice_ticket => $spice_ticket,
1082 type => $migration_type,
1083 network => $self->{opts}->{migration_network},
1084 storagemap => $self->{opts}->{storagemap},
1085 migratedfrom => PVE::INotify::nodename(),
1086 nbd_proto_version => $nbd_protocol_version,
1087 nbd => $self->{nbd},
1088 },
1089 };
1090
eef93bc5 1091 my ($tunnel_info, $spice_port);
05b2a4ae 1092
eef93bc5
FG
1093 my @online_local_volumes = $self->filter_local_volumes('online');
1094 $self->{storage_migration} = 1 if scalar(@online_local_volumes);
1095
1096 if (my $remote = $self->{opts}->{remote}) {
1097 my $remote_vmid = $remote->{vmid};
1098 $params->{migrate_opts}->{remote_node} = $self->{node};
1099 ($tunnel_info, $spice_port) = $self->phase2_start_remote_cluster($vmid, $params);
1100 die "only UNIX sockets are supported for remote migration\n"
1101 if $tunnel_info->{proto} ne 'unix';
1102
1103 my $remote_socket = $tunnel_info->{addr};
1104 my $local_socket = $remote_socket;
1105 $local_socket =~ s/$remote_vmid/$vmid/g;
1106 $tunnel_info->{addr} = $local_socket;
1107
1108 $self->log('info', "Setting up tunnel for '$local_socket'");
1109 PVE::Tunnel::forward_unix_socket($self->{tunnel}, $local_socket, $remote_socket);
1110
1111 foreach my $remote_socket (@{$tunnel_info->{unix_sockets}}) {
1112 my $local_socket = $remote_socket;
1113 $local_socket =~ s/$remote_vmid/$vmid/g;
1114 next if $self->{tunnel}->{forwarded}->{$local_socket};
1115 $self->log('info', "Setting up tunnel for '$local_socket'");
1116 PVE::Tunnel::forward_unix_socket($self->{tunnel}, $local_socket, $remote_socket);
1117 }
1118 } else {
1119 ($tunnel_info, $spice_port) = $self->phase2_start_local_cluster($vmid, $params);
1120
1121 $self->log('info', "start remote tunnel");
1122 $self->start_remote_tunnel($tunnel_info);
1123 }
05b2a4ae
FG
1124
1125 my $migrate_uri = "$tunnel_info->{proto}:$tunnel_info->{addr}";
1126 $migrate_uri .= ":$tunnel_info->{port}"
1127 if defined($tunnel_info->{port});
d296ed08 1128
efe0d457 1129 if ($self->{storage_migration}) {
b74cad8a
AD
1130 $self->{storage_migration_jobs} = {};
1131 $self->log('info', "starting storage migration");
1132
bd2d5fe6 1133 die "The number of local disks does not match between the source and the destination.\n"
efe0d457 1134 if (scalar(keys %{$self->{target_drive}}) != scalar(@online_local_volumes));
b74cad8a 1135 foreach my $drive (keys %{$self->{target_drive}}){
d189e590
SI
1136 my $target = $self->{target_drive}->{$drive};
1137 my $nbd_uri = $target->{nbd_uri};
683ab654 1138
1764fa05 1139 my $source_drive = PVE::QemuServer::parse_drive($drive, $conf->{$drive});
97ece9dd 1140 my $source_volid = $source_drive->{file};
97ece9dd 1141
05b2a4ae 1142 my $bwlimit = $self->{local_volumes}->{$source_volid}->{bwlimit};
9b6efe43 1143 my $bitmap = $target->{bitmap};
d189e590 1144
d108cb1e 1145 $self->log('info', "$drive: start migration to $nbd_uri");
9b6efe43 1146 PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 'skip', undef, $bwlimit, $bitmap);
b74cad8a
AD
1147 }
1148 }
1149
05b2a4ae 1150 $self->log('info', "starting online/live migration on $migrate_uri");
5bc1e039 1151 $self->{livemigration} = 1;
e18b0b99 1152
3beb415b
AD
1153 # load_defaults
1154 my $defaults = PVE::QemuServer::load_defaults();
1155
7de328c6
TL
1156 $self->log('info', "set migration capabilities");
1157 eval { PVE::QemuServer::set_migration_caps($vmid) };
485449e3
SR
1158 warn $@ if $@;
1159
1160 my $qemu_migrate_params = {};
1161
ddd664d7
SI
1162 # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
1163 # migrate_speed parameter in qm.conf - take the lower of the two.
eef93bc5
FG
1164 my $bwlimit = $self->get_bwlimit();
1165
2c4ba4c3 1166 my $migrate_speed = $conf->{migrate_speed} // 0;
8f43ac48 1167 $migrate_speed *= 1024; # migrate_speed is in MB/s, bwlimit in KB/s
ddd664d7 1168
2c4ba4c3
FE
1169 if ($bwlimit && $migrate_speed) {
1170 $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed;
1171 } else {
1172 $migrate_speed ||= $bwlimit;
1173 }
a89bd100 1174 $migrate_speed ||= ($defaults->{migrate_speed} || 0) * 1024;
ddd664d7 1175
a89bd100
TL
1176 if ($migrate_speed) {
1177 $migrate_speed *= 1024; # qmp takes migrate_speed in B/s.
0fca250a 1178 $self->log('info', "migration speed limit: ". render_bytes($migrate_speed, 1) ."/s");
8f43ac48
TL
1179 } else {
1180 # always set migrate speed as QEMU default to 128 MiBps == 1 Gbps, use 16 GiBps == 128 Gbps
1181 $migrate_speed = (16 << 30);
a89bd100 1182 }
8f43ac48 1183 $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
3beb415b
AD
1184
1185 my $migrate_downtime = $defaults->{migrate_downtime};
1186 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
c05f1b33
SR
1187 # migrate-set-parameters expects limit in ms
1188 $migrate_downtime *= 1000;
1189 $self->log('info', "migration downtime limit: $migrate_downtime ms");
1190 $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
3beb415b 1191
171ed95c 1192 # set cachesize to 10% of the total memory
7f8c8087 1193 my $memory = get_current_memory($conf->{memory});
171ed95c 1194 my $cachesize = int($memory * 1048576 / 10);
50d8dd5d
AD
1195 $cachesize = round_powerof2($cachesize);
1196
0fca250a 1197 $self->log('info', "migration cachesize: " . render_bytes($cachesize, 1));
485449e3
SR
1198 $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
1199
1200 $self->log('info', "set migration parameters");
e18b0b99 1201 eval {
485449e3 1202 mon_cmd($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
e18b0b99 1203 };
485449e3 1204 $self->log('info', "migrate-set-parameters error: $@") if $@;
f34d1466 1205
eef93bc5 1206 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && !$self->{opts}->{remote}) {
95a4b4a9
AD
1207 my $rpcenv = PVE::RPCEnvironment::get();
1208 my $authuser = $rpcenv->get_user();
1209
86b8228b 1210 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
95a4b4a9 1211
86b8228b 1212 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
769f187d 1213 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
95a4b4a9
AD
1214
1215 $self->log('info', "spice client_migrate_info");
1216
1217 eval {
0a13e08e 1218 mon_cmd($vmid, "client_migrate_info", protocol => 'spice',
ccab68c2 1219 hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
86b8228b 1220 'cert-subject' => $subject);
95a4b4a9
AD
1221 };
1222 $self->log('info', "client_migrate_info error: $@") if $@;
1223
1224 }
1225
9938d24d
FE
1226 my $start = time();
1227
05b2a4ae 1228 $self->log('info', "start migrate command to $migrate_uri");
5a7835f5 1229 eval {
05b2a4ae 1230 mon_cmd($vmid, "migrate", uri => $migrate_uri);
5a7835f5
AD
1231 };
1232 my $merr = $@;
05b2a4ae 1233 $self->log('info', "migrate uri => $migrate_uri failed: $merr") if $merr;
1e3baf05 1234
e693c491 1235 my $last_mem_transferred = 0;
4305207d 1236 my $usleep = 1000000;
e52bd94c 1237 my $i = 0;
b0b756c1 1238 my $err_count = 0;
865ef132
SP
1239 my $lastrem = undef;
1240 my $downtimecounter = 0;
1e3baf05 1241 while (1) {
e52bd94c 1242 $i++;
e693c491 1243 my $avglstat = $last_mem_transferred ? $last_mem_transferred / $i : 0;
e52bd94c 1244
b0b756c1 1245 usleep($usleep);
6539865a
TL
1246
1247 my $stat = eval { mon_cmd($vmid, "query-migrate") };
b0b756c1
DM
1248 if (my $err = $@) {
1249 $err_count++;
1250 warn "query migrate failed: $err\n";
f34d1466 1251 $self->log('info', "query migrate failed: $err");
b0b756c1 1252 if ($err_count <= 5) {
6539865a 1253 usleep(1_000_000);
b0b756c1
DM
1254 next;
1255 }
1256 die "too many query migrate failures - aborting\n";
1257 }
985a5f48 1258
6539865a
TL
1259 my $status = $stat->{status};
1260 if (defined($status) && $status =~ m/^(setup)$/im) {
1261 sleep(1);
1262 next;
1263 }
f5eb281a 1264
6539865a
TL
1265 if (!defined($status) || $status !~ m/^(active|completed|failed|cancelled)$/im) {
1266 die $merr if $merr;
1267 die "unable to parse migration status '$status' - aborting\n";
1268 }
1269 $merr = undef;
1270 $err_count = 0;
1271
e693c491
TL
1272 my $memstat = $stat->{ram};
1273
6539865a
TL
1274 if ($status eq 'completed') {
1275 my $delay = time() - $start;
1276 if ($delay > 0) {
0fca250a
TL
1277 my $total = $memstat->{total} || 0;
1278 my $avg_speed = render_bytes($total / $delay, 1);
6539865a 1279 my $downtime = $stat->{downtime} || 0;
0fca250a 1280 $self->log('info', "average migration speed: $avg_speed/s - downtime $downtime ms");
1e3baf05 1281 }
6539865a 1282 }
1e3baf05 1283
6539865a 1284 if ($status eq 'failed' || $status eq 'cancelled') {
55d07411
FE
1285 my $message = $stat->{'error-desc'} ? "$status - $stat->{'error-desc'}" : $status;
1286 $self->log('info', "migration status error: $message");
6539865a
TL
1287 die "aborting\n"
1288 }
a05b47a8 1289
6539865a
TL
1290 if ($status ne 'active') {
1291 $self->log('info', "migration status: $status");
1292 last;
1293 }
2e787b18 1294
e693c491
TL
1295 if ($memstat->{transferred} ne $last_mem_transferred) {
1296 my $trans = $memstat->{transferred} || 0;
1297 my $rem = $memstat->{remaining} || 0;
1298 my $total = $memstat->{total} || 0;
0fca250a
TL
1299 my $speed = ($memstat->{'pages-per-second'} // 0) * ($memstat->{'page-size'} // 0);
1300 my $dirty_rate = ($memstat->{'dirty-pages-rate'} // 0) * ($memstat->{'page-size'} // 0);
a05b47a8 1301
6539865a
TL
1302 # reduce sleep if remainig memory is lower than the average transfer speed
1303 $usleep = 100_000 if $avglstat && $rem < $avglstat;
865ef132 1304
b68a957b
TL
1305 # also reduce loggin if we poll more frequent
1306 my $should_log = $usleep > 100_000 ? 1 : ($i % 10) == 0;
370b05e7 1307
0fca250a
TL
1308 my $total_h = render_bytes($total, 1);
1309 my $transferred_h = render_bytes($trans, 1);
1310 my $speed_h = render_bytes($speed, 1);
1311
1312 my $progress = "transferred $transferred_h of $total_h VM-state, ${speed_h}/s";
1313
1314 if ($dirty_rate > $speed) {
1315 my $dirty_rate_h = render_bytes($dirty_rate, 1);
1316 $progress .= ", VM dirties lots of memory: $dirty_rate_h/s";
1317 }
1318
b68a957b 1319 $self->log('info', "migration $status, $progress") if $should_log;
0fca250a
TL
1320
1321 my $xbzrle = $stat->{"xbzrle-cache"} || {};
1322 my ($xbzrlebytes, $xbzrlepages) = $xbzrle->@{'bytes', 'pages'};
1323 if ($xbzrlebytes || $xbzrlepages) {
1324 my $bytes_h = render_bytes($xbzrlebytes, 1);
1325
1326 my $msg = "send updates to $xbzrlepages pages in $bytes_h encoded memory";
1327
1328 $msg .= sprintf(", cache-miss %.2f%%", $xbzrle->{'cache-miss-rate'} * 100)
1329 if $xbzrle->{'cache-miss-rate'};
1330
1331 $msg .= ", overflow $xbzrle->{overflow}" if $xbzrle->{overflow};
1332
b68a957b 1333 $self->log('info', "xbzrle: $msg") if $should_log;
6539865a
TL
1334 }
1335
e693c491 1336 if (($lastrem && $rem > $lastrem) || ($rem == 0)) {
6539865a
TL
1337 $downtimecounter++;
1338 }
1339 $lastrem = $rem;
1340
1341 if ($downtimecounter > 5) {
1342 $downtimecounter = 0;
1343 $migrate_downtime *= 2;
1344 $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms");
1345 eval {
1346 # migrate-set-parameters does not touch values not
1347 # specified, so this only changes downtime-limit
1348 mon_cmd($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime));
1349 };
1350 $self->log('info', "migrate-set-parameters error: $@") if $@;
1351 }
1e3baf05 1352 }
6539865a 1353
e693c491 1354 $last_mem_transferred = $memstat->{transferred};
a05b47a8 1355 }
0783c3c2
FE
1356
1357 if ($self->{storage_migration}) {
1358 # finish block-job with block-job-cancel, to disconnect source VM from NBD
1359 # to avoid it trying to re-establish it. We are in blockjob ready state,
1360 # thus, this command changes to it to blockjob complete (see qapi docs)
1361 eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, 'cancel'); };
1362 if (my $err = $@) {
1363 die "Failed to complete storage migration: $err\n";
1364 }
1365 }
1e3baf05 1366}
16e903f2 1367
c04b5b04
AD
1368sub phase2_cleanup {
1369 my ($self, $vmid, $err) = @_;
1370
af30308f
DM
1371 return if !$self->{errors};
1372 $self->{phase2errors} = 1;
1373
c04b5b04
AD
1374 $self->log('info', "aborting phase 2 - cleanup resources");
1375
19168b91
SP
1376 $self->log('info', "migrate_cancel");
1377 eval {
0a13e08e 1378 mon_cmd($vmid, "migrate_cancel");
19168b91
SP
1379 };
1380 $self->log('info', "migrate_cancel error: $@") if $@;
1381
8a0d269b
FE
1382 my $vm_status = eval {
1383 mon_cmd($vmid, 'query-status')->{status} or die "no 'status' in result\n";
1384 };
1385 $self->log('err', "query-status error: $@") if $@;
1386
1387 # Can end up in POSTMIGRATE state if failure occurred after convergence. Try going back to
1388 # original state. Unfortunately, direct transition from POSTMIGRATE to PAUSED is not possible.
1389 if ($vm_status && $vm_status eq 'postmigrate') {
1390 if (!$self->{vm_was_paused}) {
1391 eval { mon_cmd($vmid, 'cont'); };
1392 $self->log('err', "resuming VM failed: $@") if $@;
1393 } else {
1394 $self->log('err', "VM was paused, but ended in postmigrate state");
1395 }
1396 }
1397
c04b5b04
AD
1398 my $conf = $self->{vmconf};
1399 delete $conf->{lock};
ffda963f 1400 eval { PVE::QemuConfig->write_config($vmid, $conf) };
c04b5b04
AD
1401 if (my $err = $@) {
1402 $self->log('err', $err);
1403 }
1404
af30308f 1405 # cleanup ressources on target host
3b4cf0f0 1406 if ($self->{storage_migration}) {
b74cad8a
AD
1407 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
1408 if (my $err = $@) {
1409 $self->log('err', $err);
1410 }
9b3f5a5c 1411 }
b74cad8a 1412
9b3f5a5c
FG
1413 eval { $self->cleanup_bitmaps() };
1414 if (my $err =$@) {
1415 $self->log('err', $err);
b74cad8a
AD
1416 }
1417
af30308f 1418 my $nodename = PVE::INotify::nodename();
370b05e7 1419
eef93bc5
FG
1420 if ($self->{tunnel} && $self->{tunnel}->{version} >= 2) {
1421 PVE::Tunnel::write_tunnel($self->{tunnel}, 10, 'stop');
1422 } else {
1423 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
1424 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1425 if (my $err = $@) {
1426 $self->log('err', $err);
1427 $self->{errors} = 1;
1428 }
af30308f 1429 }
386c6ba7 1430
9b3f5a5c
FG
1431 # cleanup after stopping, otherwise disks might be in-use by target VM!
1432 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
1433 if (my $err = $@) {
1434 $self->log('err', $err);
1435 }
1436
1437
386c6ba7 1438 if ($self->{tunnel}) {
e594231b 1439 eval { PVE::Tunnel::finish_tunnel($self->{tunnel}); };
386c6ba7
WL
1440 if (my $err = $@) {
1441 $self->log('err', $err);
1442 $self->{errors} = 1;
1443 }
1444 }
c04b5b04
AD
1445}
1446
16e903f2
DM
1447sub phase3 {
1448 my ($self, $vmid) = @_;
f5eb281a 1449
ad8b9d5e 1450 return;
16e903f2
DM
1451}
1452
1453sub phase3_cleanup {
1454 my ($self, $vmid, $err) = @_;
1455
1456 my $conf = $self->{vmconf};
af30308f 1457 return if $self->{phase2errors};
16e903f2 1458
1d5aaa1d
FG
1459 my $tunnel = $self->{tunnel};
1460
629923d0
HD
1461 my $sourcevollist = PVE::QemuServer::get_vm_volumes($conf);
1462
eef93bc5 1463 if ($self->{volume_map} && !$self->{opts}->{remote}) {
38311a1d
TL
1464 my $target_drives = $self->{target_drive};
1465
1466 # FIXME: for NBD storage migration we now only update the volid, and
1467 # not the full drivestr from the target node. Workaround that until we
1468 # got some real rescan, to avoid things like wrong format in the drive
1469 delete $conf->{$_} for keys %$target_drives;
97ece9dd 1470 PVE::QemuConfig->update_volume_ids($conf, $self->{volume_map});
38311a1d
TL
1471
1472 for my $drive (keys %$target_drives) {
1473 $conf->{$drive} = $target_drives->{$drive}->{drivestr};
1474 }
37666e4c
FE
1475 PVE::QemuConfig->write_config($vmid, $conf);
1476 }
1477
dbc9420b 1478 # transfer replication state before move config
eef93bc5
FG
1479 if (!$self->{opts}->{remote}) {
1480 $self->transfer_replication_state() if $self->{is_replicated};
1481 PVE::QemuConfig->move_config_to_node($vmid, $self->{node});
1482 $self->switch_replication_job_target() if $self->{is_replicated};
1483 }
dbc9420b 1484
5bc1e039 1485 if ($self->{livemigration}) {
3e802221
TL
1486 if ($self->{stopnbd}) {
1487 $self->log('info', "stopping NBD storage migration server on target.");
504105c6 1488 # stop nbd server on remote vm - requirement for resume since 2.9
eef93bc5 1489 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 2) {
c2f44820
AD
1490 eval {
1491 PVE::Tunnel::write_tunnel($tunnel, 30, 'nbdstop');
1492 };
1493 if (my $err = $@) {
1494 $self->log('err', $err);
1495 $self->{errors} = 1;
1496 }
eef93bc5
FG
1497 } else {
1498 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
504105c6 1499
eef93bc5
FG
1500 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1501 if (my $err = $@) {
1502 $self->log('err', $err);
1503 $self->{errors} = 1;
1504 }
504105c6
FG
1505 }
1506 }
1d5aaa1d 1507
73ed6496
AD
1508 # deletes local FDB entries if learning is disabled, they'll be re-added on target on resume
1509 PVE::QemuServer::del_nets_bridge_fdb($conf, $vmid);
1510
a183576e
FE
1511 if (!$self->{vm_was_paused}) {
1512 # config moved and nbd server stopped - now we can resume vm on target
1513 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) {
eef93bc5 1514 my $cmd = $tunnel->{version} == 1 ? "resume $vmid" : "resume";
a183576e 1515 eval {
eef93bc5 1516 PVE::Tunnel::write_tunnel($tunnel, 30, $cmd);
a183576e
FE
1517 };
1518 if (my $err = $@) {
1519 $self->log('err', $err);
1520 $self->{errors} = 1;
1521 }
1522 } else {
a20dc58a 1523 # nocheck in case target node hasn't processed the config move/rename yet
a183576e
FE
1524 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
1525 my $logf = sub {
1526 my $line = shift;
1527 $self->log('err', $line);
1528 };
1529 eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); };
1530 if (my $err = $@) {
1531 $self->log('err', $err);
1532 $self->{errors} = 1;
1533 }
1d5aaa1d 1534 }
0028391f 1535 }
ca662131 1536
0028391f
FE
1537 if (
1538 $self->{storage_migration}
1539 && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks}
1540 && $self->{running}
1541 ) {
1542 if (!$self->{vm_was_paused}) {
1543 $self->log('info', "issuing guest fstrim");
eef93bc5
FG
1544 if ($self->{opts}->{remote}) {
1545 PVE::Tunnel::write_tunnel($self->{tunnel}, 600, 'fstrim');
1546 } else {
1547 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
1548 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1549 if (my $err = $@) {
1550 $self->log('err', "fstrim failed - $err");
1551 $self->{errors} = 1;
1552 }
0028391f
FE
1553 }
1554 } else {
1555 $self->log('info', "skipping guest fstrim, because VM is paused");
a183576e 1556 }
ca662131 1557 }
b67900f1
AD
1558 }
1559
2e7fee87 1560 # close tunnel on successful migration, on error phase2_cleanup closed it
eef93bc5 1561 if ($tunnel && $tunnel->{version} == 1) {
e594231b 1562 eval { PVE::Tunnel::finish_tunnel($tunnel); };
2e7fee87
FG
1563 if (my $err = $@) {
1564 $self->log('err', $err);
1565 $self->{errors} = 1;
1566 }
eef93bc5
FG
1567 $tunnel = undef;
1568 delete $self->{tunnel};
2e7fee87
FG
1569 }
1570
fd8469f7 1571 eval {
fd8469f7
AD
1572 my $timer = 0;
1573 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
1574 $self->log('info', "Waiting for spice server migration");
1575 while (1) {
0a13e08e 1576 my $res = mon_cmd($vmid, 'query-spice');
fd8469f7
AD
1577 last if int($res->{'migrated'}) == 1;
1578 last if $timer > 50;
1579 $timer ++;
1580 usleep(200000);
769f187d 1581 }
fd8469f7
AD
1582 }
1583 };
95a4b4a9 1584
a20dc58a 1585 # always stop local VM with nocheck, since config is moved already
16e903f2
DM
1586 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
1587 if (my $err = $@) {
1588 $self->log('err', "stopping vm failed - $err");
1589 $self->{errors} = 1;
1590 }
1591
1592 # always deactivate volumes - avoid lvm LVs to be active on several nodes
1593 eval {
629923d0 1594 PVE::Storage::deactivate_volumes($self->{storecfg}, $sourcevollist);
16e903f2
DM
1595 };
1596 if (my $err = $@) {
1597 $self->log('err', $err);
1598 $self->{errors} = 1;
1599 }
1600
4b26ffbf 1601 my @not_replicated_volumes = $self->filter_local_volumes(undef, 0);
9b6efe43 1602
4b26ffbf
FE
1603 # destroy local copies
1604 foreach my $volid (@not_replicated_volumes) {
eef93bc5
FG
1605 # remote is cleaned up below
1606 next if $self->{opts}->{remote};
1607
ad8b9d5e
FE
1608 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
1609 if (my $err = $@) {
1610 $self->log('err', "removing local copy of '$volid' failed - $err");
1611 $self->{errors} = 1;
1612 last if $err =~ /^interrupted by signal$/;
b74cad8a 1613 }
b74cad8a
AD
1614 }
1615
16e903f2 1616 # clear migrate lock
eef93bc5
FG
1617 if ($tunnel && $tunnel->{version} >= 2) {
1618 PVE::Tunnel::write_tunnel($tunnel, 10, "unlock");
1619
1620 PVE::Tunnel::finish_tunnel($tunnel);
1621 } else {
1622 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
1623 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
1624 }
1625
1626 if ($self->{opts}->{remote} && $self->{opts}->{delete}) {
1627 eval { PVE::QemuServer::destroy_vm($self->{storecfg}, $vmid, 1, undef, 0) };
1628 warn "Failed to remove source VM - $@\n" if $@;
1629 }
16e903f2
DM
1630}
1631
1632sub final_cleanup {
1633 my ($self, $vmid) = @_;
1634
1635 # nothing to do
1636}
1637
50d8dd5d
AD
1638sub round_powerof2 {
1639 return 1 if $_[0] < 2;
1640 return 2 << int(log($_[0]-1)/log(2));
1641}
1642
16e903f2 16431;