]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
Add LVM and LVMThin to QemuMigration
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use POSIX qw( WNOHANG );
9 use PVE::INotify;
10 use PVE::Tools;
11 use PVE::Cluster;
12 use PVE::Storage;
13 use PVE::QemuServer;
14 use Time::HiRes qw( usleep );
15 use PVE::RPCEnvironment;
16
17 use base qw(PVE::AbstractMigrate);
18
19 sub fork_command_pipe {
20 my ($self, $cmd) = @_;
21
22 my $reader = IO::File->new();
23 my $writer = IO::File->new();
24
25 my $orig_pid = $$;
26
27 my $cpid;
28
29 eval { $cpid = open2($reader, $writer, @$cmd); };
30
31 my $err = $@;
32
33 # catch exec errors
34 if ($orig_pid != $$) {
35 $self->log('err', "can't fork command pipe\n");
36 POSIX::_exit(1);
37 kill('KILL', $$);
38 }
39
40 die $err if $err;
41
42 return { writer => $writer, reader => $reader, pid => $cpid };
43 }
44
45 sub finish_command_pipe {
46 my ($self, $cmdpipe, $timeout) = @_;
47
48 my $cpid = $cmdpipe->{pid};
49 return if !defined($cpid);
50
51 my $writer = $cmdpipe->{writer};
52 my $reader = $cmdpipe->{reader};
53
54 $writer->close();
55 $reader->close();
56
57 my $collect_child_process = sub {
58 my $res = waitpid($cpid, WNOHANG);
59 if (defined($res) && ($res == $cpid)) {
60 delete $cmdpipe->{cpid};
61 return 1;
62 } else {
63 return 0;
64 }
65 };
66
67 if ($timeout) {
68 for (my $i = 0; $i < $timeout; $i++) {
69 return if &$collect_child_process();
70 sleep(1);
71 }
72 }
73
74 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
75 kill(15, $cpid);
76
77 # wait again
78 for (my $i = 0; $i < 10; $i++) {
79 return if &$collect_child_process();
80 sleep(1);
81 }
82
83 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
84 kill 9, $cpid;
85 sleep 1;
86
87 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
88 if !&$collect_child_process();
89 }
90
91 sub fork_tunnel {
92 my ($self, $tunnel_addr) = @_;
93
94 my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
95
96 my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ];
97
98 my $tunnel = $self->fork_command_pipe($cmd);
99
100 my $reader = $tunnel->{reader};
101
102 my $helo;
103 eval {
104 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
105 die "no reply\n" if !$helo;
106 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
107 die "got strange reply from mtunnel ('$helo')\n"
108 if $helo !~ m/^tunnel online$/;
109 };
110 my $err = $@;
111
112 if ($err) {
113 $self->finish_command_pipe($tunnel);
114 die "can't open migration tunnel - $err";
115 }
116 return $tunnel;
117 }
118
119 sub finish_tunnel {
120 my ($self, $tunnel) = @_;
121
122 my $writer = $tunnel->{writer};
123
124 eval {
125 PVE::Tools::run_with_timeout(30, sub {
126 print $writer "quit\n";
127 $writer->flush();
128 });
129 };
130 my $err = $@;
131
132 $self->finish_command_pipe($tunnel, 30);
133
134 if ($tunnel->{sock_addr}) {
135 # ssh does not clean up on local host
136 my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; #
137 PVE::Tools::run_command($cmd);
138
139 # .. and just to be sure check on remote side
140 unshift @{$cmd}, @{$self->{rem_ssh}};
141 PVE::Tools::run_command($cmd);
142 }
143
144 die $err if $err;
145 }
146
147 sub lock_vm {
148 my ($self, $vmid, $code, @param) = @_;
149
150 return PVE::QemuConfig->lock_config($vmid, $code, @param);
151 }
152
153 sub prepare {
154 my ($self, $vmid) = @_;
155
156 my $online = $self->{opts}->{online};
157
158 $self->{storecfg} = PVE::Storage::config();
159
160 # test if VM exists
161 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
162
163 PVE::QemuConfig->check_lock($conf);
164
165 my $running = 0;
166 if (my $pid = PVE::QemuServer::check_running($vmid)) {
167 die "cant migrate running VM without --online\n" if !$online;
168 $running = $pid;
169
170 $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
171
172 }
173
174 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
175 if ($self->{running} || !$self->{opts}->{force}) {
176 die "can't migrate VM which uses local devices\n";
177 } else {
178 $self->log('info', "migrating VM which uses local devices");
179 }
180 }
181
182 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
183
184 my $need_activate = [];
185 foreach my $volid (@$vollist) {
186 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
187
188 # check if storage is available on both nodes
189 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
190 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
191
192 if ($scfg->{shared}) {
193 # PVE::Storage::activate_storage checks this for non-shared storages
194 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
195 warn "Used shared storage '$sid' is not online on source node!\n"
196 if !$plugin->check_connection($sid, $scfg);
197 } else {
198 # only activate if not shared
199 push @$need_activate, $volid;
200 }
201 }
202
203 # activate volumes
204 PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
205
206 # test ssh connection
207 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
208 eval { $self->cmd_quiet($cmd); };
209 die "Can't connect to destination address using public key\n" if $@;
210
211 return $running;
212 }
213
214 sub sync_disks {
215 my ($self, $vmid) = @_;
216
217 $self->log('info', "copying disk images");
218
219 my $conf = $self->{vmconf};
220
221 $self->{volumes} = [];
222
223 my $res = [];
224
225 eval {
226
227 my $volhash = {};
228 my $cdromhash = {};
229
230 my $sharedvm = 1;
231
232 my @sids = PVE::Storage::storage_ids($self->{storecfg});
233 foreach my $storeid (@sids) {
234 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
235 next if $scfg->{shared};
236 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
237
238 # get list from PVE::Storage (for unused volumes)
239 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
240 PVE::Storage::foreach_volid($dl, sub {
241 my ($volid, $sid, $volname) = @_;
242
243 # check if storage is available on target node
244 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
245
246 $volhash->{$volid} = 1;
247 $sharedvm = 0; # there is a non-shared disk
248 });
249 }
250
251 # and add used, owned/non-shared disks (just to be sure we have all)
252
253 PVE::QemuServer::foreach_volid($conf, sub {
254 my ($volid, $is_cdrom) = @_;
255
256 return if !$volid;
257
258 die "can't migrate local file/device '$volid'\n" if $volid =~ m|^/|;
259
260 if ($is_cdrom) {
261 die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
262 return if $volid eq 'none';
263 $cdromhash->{$volid} = 1;
264 }
265
266 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
267
268 # check if storage is available on both nodes
269 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
270 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
271
272 return if $scfg->{shared};
273
274 die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
275
276 $sharedvm = 0;
277
278 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
279
280 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
281 if !$owner || ($owner != $self->{vmid});
282
283 $volhash->{$volid} = 1;
284 });
285
286 if ($self->{running} && !$sharedvm) {
287 die "can't do online migration - VM uses local disks\n";
288 }
289
290 # additional checks for local storage
291 foreach my $volid (keys %$volhash) {
292 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
293 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
294
295 my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') ||
296 ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm');
297
298 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
299 if !$migratable;
300
301 # image is a linked clone on local storage, se we can't migrate.
302 if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
303 die "can't migrate '$volid' as it's a clone of '$basename'";
304 }
305 }
306
307 foreach my $volid (keys %$volhash) {
308 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
309 push @{$self->{volumes}}, $volid;
310 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
311 }
312 };
313 die "Failed to sync data - $@" if $@;
314 }
315
316 sub phase1 {
317 my ($self, $vmid) = @_;
318
319 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
320
321 my $conf = $self->{vmconf};
322
323 # set migrate lock in config file
324 $conf->{lock} = 'migrate';
325 PVE::QemuConfig->write_config($vmid, $conf);
326
327 sync_disks($self, $vmid);
328
329 };
330
331 sub phase1_cleanup {
332 my ($self, $vmid, $err) = @_;
333
334 $self->log('info', "aborting phase 1 - cleanup resources");
335
336 my $conf = $self->{vmconf};
337 delete $conf->{lock};
338 eval { PVE::QemuConfig->write_config($vmid, $conf) };
339 if (my $err = $@) {
340 $self->log('err', $err);
341 }
342
343 if ($self->{volumes}) {
344 foreach my $volid (@{$self->{volumes}}) {
345 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
346 # fixme: try to remove ?
347 }
348 }
349 }
350
351 sub phase2 {
352 my ($self, $vmid) = @_;
353
354 my $conf = $self->{vmconf};
355
356 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
357
358 my $raddr;
359 my $rport;
360 my $ruri; # the whole migration dst. URI (protocol:address[:port])
361 my $nodename = PVE::INotify::nodename();
362
363 ## start on remote node
364 my $cmd = [@{$self->{rem_ssh}}];
365
366 my $spice_ticket;
367 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
368 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
369 $spice_ticket = $res->{ticket};
370 }
371
372 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
373
374 # we use TCP only for unsecure migrations as TCP ssh forward tunnels often
375 # did appeared to late (they are hard, if not impossible, to check for)
376 # secure migration use UNIX sockets now, this *breaks* compatibilty when trying
377 # to migrate from new to old but *not* from old to new.
378 my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg');
379 my $secure_migration = ($datacenterconf->{migration_unsecure}) ? 0 : 1;
380
381 if (!$secure_migration) {
382 push @$cmd, '--stateuri', 'tcp';
383 } else {
384 push @$cmd, '--stateuri', 'unix';
385 }
386
387 if ($self->{forcemachine}) {
388 push @$cmd, '--machine', $self->{forcemachine};
389 }
390
391 my $spice_port;
392
393 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
394 # instead we pipe it through STDIN
395 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
396 my $line = shift;
397
398 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
399 $raddr = $1;
400 $rport = int($2);
401 $ruri = "tcp:$raddr:$rport";
402 }
403 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
404 $raddr = $1;
405 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
406 $ruri = "unix:$raddr";
407 }
408 elsif ($line =~ m/^migration listens on port (\d+)$/) {
409 $raddr = "localhost";
410 $rport = int($1);
411 $ruri = "tcp:$raddr:$rport";
412 }
413 elsif ($line =~ m/^spice listens on port (\d+)$/) {
414 $spice_port = int($1);
415 }
416 }, errfunc => sub {
417 my $line = shift;
418 $self->log('info', $line);
419 });
420
421 die "unable to detect remote migration address\n" if !$raddr;
422
423 if ($secure_migration) {
424 $self->log('info', "start remote tunnel");
425
426 if ($ruri =~ /^unix:/) {
427 unlink $raddr;
428 $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr");
429 $self->{tunnel}->{sock_addr} = $raddr;
430
431 my $unix_socket_try = 0; # wait for the socket to become ready
432 while (! -S $raddr) {
433 $unix_socket_try++;
434 if ($unix_socket_try > 100) {
435 $self->{errors} = 1;
436 $self->finish_tunnel($self->{tunnel});
437 die "Timeout, migration socket $ruri did not get ready";
438 }
439
440 usleep(50000);
441 }
442
443 } elsif ($ruri =~ /^tcp:/) {
444 my $tunnel_addr;
445 if ($raddr eq "localhost") {
446 # for backwards compatibility with older qemu-server versions
447 my $pfamily = PVE::Tools::get_host_address_family($nodename);
448 my $lport = PVE::Tools::next_migrate_port($pfamily);
449 $tunnel_addr = "$lport:localhost:$rport";
450 }
451
452 $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
453
454 } else {
455 die "unsupported protocol in migration URI: $ruri\n";
456 }
457 }
458
459 my $start = time();
460 $self->log('info', "starting online/live migration on $ruri");
461 $self->{livemigration} = 1;
462
463 # load_defaults
464 my $defaults = PVE::QemuServer::load_defaults();
465
466 # always set migrate speed (overwrite kvm default of 32m)
467 # we set a very hight default of 8192m which is basically unlimited
468 my $migrate_speed = $defaults->{migrate_speed} || 8192;
469 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
470 $migrate_speed = $migrate_speed * 1048576;
471 $self->log('info', "migrate_set_speed: $migrate_speed");
472 eval {
473 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
474 };
475 $self->log('info', "migrate_set_speed error: $@") if $@;
476
477 my $migrate_downtime = $defaults->{migrate_downtime};
478 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
479 if (defined($migrate_downtime)) {
480 $self->log('info', "migrate_set_downtime: $migrate_downtime");
481 eval {
482 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
483 };
484 $self->log('info', "migrate_set_downtime error: $@") if $@;
485 }
486
487 $self->log('info', "set migration_caps");
488 eval {
489 PVE::QemuServer::set_migration_caps($vmid);
490 };
491 warn $@ if $@;
492
493 #set cachesize 10% of the total memory
494 my $cachesize = int($conf->{memory}*1048576/10);
495 $self->log('info', "set cachesize: $cachesize");
496 eval {
497 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
498 };
499 $self->log('info', "migrate-set-cache-size error: $@") if $@;
500
501 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
502 my $rpcenv = PVE::RPCEnvironment::get();
503 my $authuser = $rpcenv->get_user();
504
505 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
506
507 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
508 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
509
510 $self->log('info', "spice client_migrate_info");
511
512 eval {
513 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
514 hostname => $proxyticket, 'tls-port' => $spice_port,
515 'cert-subject' => $subject);
516 };
517 $self->log('info', "client_migrate_info error: $@") if $@;
518
519 }
520
521 $self->log('info', "start migrate command to $ruri");
522 eval {
523 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri);
524 };
525 my $merr = $@;
526 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
527
528 my $lstat = 0;
529 my $usleep = 2000000;
530 my $i = 0;
531 my $err_count = 0;
532 my $lastrem = undef;
533 my $downtimecounter = 0;
534 while (1) {
535 $i++;
536 my $avglstat = $lstat/$i if $lstat;
537
538 usleep($usleep);
539 my $stat;
540 eval {
541 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
542 };
543 if (my $err = $@) {
544 $err_count++;
545 warn "query migrate failed: $err\n";
546 $self->log('info', "query migrate failed: $err");
547 if ($err_count <= 5) {
548 usleep(1000000);
549 next;
550 }
551 die "too many query migrate failures - aborting\n";
552 }
553
554 if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) {
555 sleep(1);
556 next;
557 }
558
559 if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
560 $merr = undef;
561 $err_count = 0;
562 if ($stat->{status} eq 'completed') {
563 my $delay = time() - $start;
564 if ($delay > 0) {
565 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
566 my $downtime = $stat->{downtime} || 0;
567 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
568 }
569 }
570
571 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
572 $self->log('info', "migration status error: $stat->{status}");
573 die "aborting\n"
574 }
575
576 if ($stat->{status} ne 'active') {
577 $self->log('info', "migration status: $stat->{status}");
578 last;
579 }
580
581 if ($stat->{ram}->{transferred} ne $lstat) {
582 my $trans = $stat->{ram}->{transferred} || 0;
583 my $rem = $stat->{ram}->{remaining} || 0;
584 my $total = $stat->{ram}->{total} || 0;
585 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
586 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
587 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
588 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
589 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
590 #reduce sleep if remainig memory if lower than the everage transfert
591 $usleep = 300000 if $avglstat && $rem < $avglstat;
592
593 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
594 "remaining ${rem}), total ${total})");
595
596 if (${xbzrlecachesize}) {
597 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
598 }
599
600 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
601 $downtimecounter++;
602 }
603 $lastrem = $rem;
604
605 if ($downtimecounter > 5) {
606 $downtimecounter = 0;
607 $migrate_downtime *= 2;
608 $self->log('info', "migrate_set_downtime: $migrate_downtime");
609 eval {
610 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
611 };
612 $self->log('info', "migrate_set_downtime error: $@") if $@;
613 }
614
615 }
616
617
618 $lstat = $stat->{ram}->{transferred};
619
620 } else {
621 die $merr if $merr;
622 die "unable to parse migration status '$stat->{status}' - aborting\n";
623 }
624 }
625
626 # just to be sure that the tunnel gets closed on successful migration, on error
627 # phase2_cleanup closes it *after* stopping the remote waiting VM
628 if (!$self->{errors} && $self->{tunnel}) {
629 eval { finish_tunnel($self, $self->{tunnel}); };
630 if (my $err = $@) {
631 $self->log('err', $err);
632 $self->{errors} = 1;
633 }
634 }
635 }
636
637 sub phase2_cleanup {
638 my ($self, $vmid, $err) = @_;
639
640 return if !$self->{errors};
641 $self->{phase2errors} = 1;
642
643 $self->log('info', "aborting phase 2 - cleanup resources");
644
645 $self->log('info', "migrate_cancel");
646 eval {
647 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
648 };
649 $self->log('info', "migrate_cancel error: $@") if $@;
650
651 my $conf = $self->{vmconf};
652 delete $conf->{lock};
653 eval { PVE::QemuConfig->write_config($vmid, $conf) };
654 if (my $err = $@) {
655 $self->log('err', $err);
656 }
657
658 # cleanup ressources on target host
659 my $nodename = PVE::INotify::nodename();
660
661 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
662 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
663 if (my $err = $@) {
664 $self->log('err', $err);
665 $self->{errors} = 1;
666 }
667
668 if ($self->{tunnel}) {
669 eval { finish_tunnel($self, $self->{tunnel}); };
670 if (my $err = $@) {
671 $self->log('err', $err);
672 $self->{errors} = 1;
673 }
674 }
675 }
676
677 sub phase3 {
678 my ($self, $vmid) = @_;
679
680 my $volids = $self->{volumes};
681 return if $self->{phase2errors};
682
683 # destroy local copies
684 foreach my $volid (@$volids) {
685 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
686 if (my $err = $@) {
687 $self->log('err', "removing local copy of '$volid' failed - $err");
688 $self->{errors} = 1;
689 last if $err =~ /^interrupted by signal$/;
690 }
691 }
692 }
693
694 sub phase3_cleanup {
695 my ($self, $vmid, $err) = @_;
696
697 my $conf = $self->{vmconf};
698 return if $self->{phase2errors};
699
700 # move config to remote node
701 my $conffile = PVE::QemuConfig->config_file($vmid);
702 my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
703
704 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
705 if !rename($conffile, $newconffile);
706
707 if ($self->{livemigration}) {
708 # now that config file is move, we can resume vm on target if livemigrate
709 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
710 eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
711 errfunc => sub {
712 my $line = shift;
713 $self->log('err', $line);
714 });
715 };
716 if (my $err = $@) {
717 $self->log('err', $err);
718 $self->{errors} = 1;
719 }
720 }
721
722 eval {
723
724 my $timer = 0;
725 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
726 $self->log('info', "Waiting for spice server migration");
727 while (1) {
728 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
729 last if int($res->{'migrated'}) == 1;
730 last if $timer > 50;
731 $timer ++;
732 usleep(200000);
733 }
734 }
735 };
736
737 # always stop local VM
738 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
739 if (my $err = $@) {
740 $self->log('err', "stopping vm failed - $err");
741 $self->{errors} = 1;
742 }
743
744 # always deactivate volumes - avoid lvm LVs to be active on several nodes
745 eval {
746 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
747 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
748 };
749 if (my $err = $@) {
750 $self->log('err', $err);
751 $self->{errors} = 1;
752 }
753
754 # clear migrate lock
755 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
756 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
757 }
758
759 sub final_cleanup {
760 my ($self, $vmid) = @_;
761
762 # nothing to do
763 }
764
765 1;