]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
fix perl scope issues
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use POSIX qw( WNOHANG );
9 use PVE::INotify;
10 use PVE::Tools;
11 use PVE::Cluster;
12 use PVE::Storage;
13 use PVE::QemuServer;
14 use Time::HiRes qw( usleep );
15 use PVE::RPCEnvironment;
16
17 use base qw(PVE::AbstractMigrate);
18
19 sub fork_command_pipe {
20 my ($self, $cmd) = @_;
21
22 my $reader = IO::File->new();
23 my $writer = IO::File->new();
24
25 my $orig_pid = $$;
26
27 my $cpid;
28
29 eval { $cpid = open2($reader, $writer, @$cmd); };
30
31 my $err = $@;
32
33 # catch exec errors
34 if ($orig_pid != $$) {
35 $self->log('err', "can't fork command pipe\n");
36 POSIX::_exit(1);
37 kill('KILL', $$);
38 }
39
40 die $err if $err;
41
42 return { writer => $writer, reader => $reader, pid => $cpid };
43 }
44
45 sub finish_command_pipe {
46 my ($self, $cmdpipe, $timeout) = @_;
47
48 my $cpid = $cmdpipe->{pid};
49 return if !defined($cpid);
50
51 my $writer = $cmdpipe->{writer};
52 my $reader = $cmdpipe->{reader};
53
54 $writer->close();
55 $reader->close();
56
57 my $collect_child_process = sub {
58 my $res = waitpid($cpid, WNOHANG);
59 if (defined($res) && ($res == $cpid)) {
60 delete $cmdpipe->{cpid};
61 return 1;
62 } else {
63 return 0;
64 }
65 };
66
67 if ($timeout) {
68 for (my $i = 0; $i < $timeout; $i++) {
69 return if &$collect_child_process();
70 sleep(1);
71 }
72 }
73
74 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
75 kill(15, $cpid);
76
77 # wait again
78 for (my $i = 0; $i < 10; $i++) {
79 return if &$collect_child_process();
80 sleep(1);
81 }
82
83 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
84 kill 9, $cpid;
85 sleep 1;
86
87 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
88 if !&$collect_child_process();
89 }
90
91 sub fork_tunnel {
92 my ($self, $tunnel_addr) = @_;
93
94 my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
95
96 my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ];
97
98 my $tunnel = $self->fork_command_pipe($cmd);
99
100 my $reader = $tunnel->{reader};
101
102 my $helo;
103 eval {
104 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
105 die "no reply\n" if !$helo;
106 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
107 die "got strange reply from mtunnel ('$helo')\n"
108 if $helo !~ m/^tunnel online$/;
109 };
110 my $err = $@;
111
112 if ($err) {
113 $self->finish_command_pipe($tunnel);
114 die "can't open migration tunnel - $err";
115 }
116 return $tunnel;
117 }
118
119 sub finish_tunnel {
120 my ($self, $tunnel) = @_;
121
122 my $writer = $tunnel->{writer};
123
124 eval {
125 PVE::Tools::run_with_timeout(30, sub {
126 print $writer "quit\n";
127 $writer->flush();
128 });
129 };
130 my $err = $@;
131
132 $self->finish_command_pipe($tunnel, 30);
133
134 if ($tunnel->{sock_addr}) {
135 # ssh does not clean up on local host
136 my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; #
137 PVE::Tools::run_command($cmd);
138
139 # .. and just to be sure check on remote side
140 unshift @{$cmd}, @{$self->{rem_ssh}};
141 PVE::Tools::run_command($cmd);
142 }
143
144 die $err if $err;
145 }
146
147 sub lock_vm {
148 my ($self, $vmid, $code, @param) = @_;
149
150 return PVE::QemuConfig->lock_config($vmid, $code, @param);
151 }
152
153 sub prepare {
154 my ($self, $vmid) = @_;
155
156 my $online = $self->{opts}->{online};
157
158 $self->{storecfg} = PVE::Storage::config();
159
160 # test if VM exists
161 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
162
163 PVE::QemuConfig->check_lock($conf);
164
165 my $running = 0;
166 if (my $pid = PVE::QemuServer::check_running($vmid)) {
167 die "can't migrate running VM without --online\n" if !$online;
168 $running = $pid;
169
170 $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
171
172 }
173
174 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
175 if ($self->{running} || !$self->{opts}->{force}) {
176 die "can't migrate VM which uses local devices\n";
177 } else {
178 $self->log('info', "migrating VM which uses local devices");
179 }
180 }
181
182 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
183
184 my $need_activate = [];
185 foreach my $volid (@$vollist) {
186 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
187
188 # check if storage is available on both nodes
189 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
190 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
191
192 if ($scfg->{shared}) {
193 # PVE::Storage::activate_storage checks this for non-shared storages
194 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
195 warn "Used shared storage '$sid' is not online on source node!\n"
196 if !$plugin->check_connection($sid, $scfg);
197 } else {
198 # only activate if not shared
199 push @$need_activate, $volid;
200 }
201 }
202
203 # activate volumes
204 PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
205
206 # test ssh connection
207 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
208 eval { $self->cmd_quiet($cmd); };
209 die "Can't connect to destination address using public key\n" if $@;
210
211 return $running;
212 }
213
214 sub sync_disks {
215 my ($self, $vmid) = @_;
216
217 $self->log('info', "copying disk images");
218
219 my $conf = $self->{vmconf};
220
221 $self->{volumes} = [];
222
223 my $res = [];
224
225 eval {
226
227 my $volhash = {};
228 my $cdromhash = {};
229
230 my $sharedvm = 1;
231
232 my @sids = PVE::Storage::storage_ids($self->{storecfg});
233 foreach my $storeid (@sids) {
234 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
235 next if $scfg->{shared};
236 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
237
238 # get list from PVE::Storage (for unused volumes)
239 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
240 PVE::Storage::foreach_volid($dl, sub {
241 my ($volid, $sid, $volname) = @_;
242
243 # check if storage is available on target node
244 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
245
246 $volhash->{$volid} = 1;
247 $sharedvm = 0; # there is a non-shared disk
248 });
249 }
250
251 my $test_volid = sub {
252 my ($volid, $is_cdrom, $snapname) = @_;
253
254 return if !$volid;
255
256 die "can't migrate local file/device '$volid'\n" if $volid =~ m|^/|;
257
258 if ($is_cdrom) {
259 die "can't migrate local cdrom drive\n" if $volid eq 'cdrom';
260 return if $volid eq 'none';
261 $cdromhash->{$volid} = 1;
262 }
263
264 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
265
266 # check if storage is available on both nodes
267 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
268 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
269
270 return if $scfg->{shared};
271
272 $sharedvm = 0;
273
274 die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
275
276 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
277
278 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
279 if !$owner || ($owner != $self->{vmid});
280
281 if (defined($snapname)) {
282 # we cannot migrate shapshots on local storage
283 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
284
285 my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
286
287 if (($scfg->{type} eq 'zfspool') || ($format eq 'qcow2')) {
288 $volhash->{$volid} = 1;
289 return;
290 }
291
292 die "can't migrate snapshot of local volume '$volid'\n";
293
294 } else {
295 $volhash->{$volid} = 1;
296 }
297 };
298
299 PVE::QemuServer::foreach_volid($conf, $test_volid);
300 foreach my $snapname (keys %{$conf->{snapshots}}) {
301 PVE::QemuServer::foreach_volid($conf->{snapshots}->{$snapname}, $test_volid, $snapname);
302 }
303
304 if ($self->{running} && !$sharedvm) {
305 die "can't do online migration - VM uses local disks\n";
306 }
307
308 # additional checks for local storage
309 foreach my $volid (keys %$volhash) {
310 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
311 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
312
313 my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') ||
314 ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm');
315
316 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
317 if !$migratable;
318
319 # image is a linked clone on local storage, se we can't migrate.
320 if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
321 die "can't migrate '$volid' as it's a clone of '$basename'";
322 }
323 }
324
325 foreach my $volid (keys %$volhash) {
326 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
327 push @{$self->{volumes}}, $volid;
328 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
329 }
330 };
331 die "Failed to sync data - $@" if $@;
332 }
333
334 sub phase1 {
335 my ($self, $vmid) = @_;
336
337 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
338
339 my $conf = $self->{vmconf};
340
341 # set migrate lock in config file
342 $conf->{lock} = 'migrate';
343 PVE::QemuConfig->write_config($vmid, $conf);
344
345 sync_disks($self, $vmid);
346
347 };
348
349 sub phase1_cleanup {
350 my ($self, $vmid, $err) = @_;
351
352 $self->log('info', "aborting phase 1 - cleanup resources");
353
354 my $conf = $self->{vmconf};
355 delete $conf->{lock};
356 eval { PVE::QemuConfig->write_config($vmid, $conf) };
357 if (my $err = $@) {
358 $self->log('err', $err);
359 }
360
361 if ($self->{volumes}) {
362 foreach my $volid (@{$self->{volumes}}) {
363 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
364 # fixme: try to remove ?
365 }
366 }
367 }
368
369 sub phase2 {
370 my ($self, $vmid) = @_;
371
372 my $conf = $self->{vmconf};
373
374 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
375
376 my $raddr;
377 my $rport;
378 my $ruri; # the whole migration dst. URI (protocol:address[:port])
379 my $nodename = PVE::INotify::nodename();
380
381 ## start on remote node
382 my $cmd = [@{$self->{rem_ssh}}];
383
384 my $spice_ticket;
385 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
386 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
387 $spice_ticket = $res->{ticket};
388 }
389
390 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
391
392 # we use TCP only for unsecure migrations as TCP ssh forward tunnels often
393 # did appeared to late (they are hard, if not impossible, to check for)
394 # secure migration use UNIX sockets now, this *breaks* compatibilty when trying
395 # to migrate from new to old but *not* from old to new.
396 my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg');
397 my $secure_migration = ($datacenterconf->{migration_unsecure}) ? 0 : 1;
398
399 if (!$secure_migration) {
400 push @$cmd, '--stateuri', 'tcp';
401 } else {
402 push @$cmd, '--stateuri', 'unix';
403 }
404
405 if ($self->{forcemachine}) {
406 push @$cmd, '--machine', $self->{forcemachine};
407 }
408
409 my $spice_port;
410
411 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
412 # instead we pipe it through STDIN
413 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
414 my $line = shift;
415
416 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
417 $raddr = $1;
418 $rport = int($2);
419 $ruri = "tcp:$raddr:$rport";
420 }
421 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
422 $raddr = $1;
423 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
424 $ruri = "unix:$raddr";
425 }
426 elsif ($line =~ m/^migration listens on port (\d+)$/) {
427 $raddr = "localhost";
428 $rport = int($1);
429 $ruri = "tcp:$raddr:$rport";
430 }
431 elsif ($line =~ m/^spice listens on port (\d+)$/) {
432 $spice_port = int($1);
433 }
434 }, errfunc => sub {
435 my $line = shift;
436 $self->log('info', $line);
437 });
438
439 die "unable to detect remote migration address\n" if !$raddr;
440
441 if ($secure_migration) {
442 $self->log('info', "start remote tunnel");
443
444 if ($ruri =~ /^unix:/) {
445 unlink $raddr;
446 $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr");
447 $self->{tunnel}->{sock_addr} = $raddr;
448
449 my $unix_socket_try = 0; # wait for the socket to become ready
450 while (! -S $raddr) {
451 $unix_socket_try++;
452 if ($unix_socket_try > 100) {
453 $self->{errors} = 1;
454 $self->finish_tunnel($self->{tunnel});
455 die "Timeout, migration socket $ruri did not get ready";
456 }
457
458 usleep(50000);
459 }
460
461 } elsif ($ruri =~ /^tcp:/) {
462 my $tunnel_addr;
463 if ($raddr eq "localhost") {
464 # for backwards compatibility with older qemu-server versions
465 my $pfamily = PVE::Tools::get_host_address_family($nodename);
466 my $lport = PVE::Tools::next_migrate_port($pfamily);
467 $tunnel_addr = "$lport:localhost:$rport";
468 }
469
470 $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
471
472 } else {
473 die "unsupported protocol in migration URI: $ruri\n";
474 }
475 }
476
477 my $start = time();
478 $self->log('info', "starting online/live migration on $ruri");
479 $self->{livemigration} = 1;
480
481 # load_defaults
482 my $defaults = PVE::QemuServer::load_defaults();
483
484 # always set migrate speed (overwrite kvm default of 32m)
485 # we set a very hight default of 8192m which is basically unlimited
486 my $migrate_speed = $defaults->{migrate_speed} || 8192;
487 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
488 $migrate_speed = $migrate_speed * 1048576;
489 $self->log('info', "migrate_set_speed: $migrate_speed");
490 eval {
491 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
492 };
493 $self->log('info', "migrate_set_speed error: $@") if $@;
494
495 my $migrate_downtime = $defaults->{migrate_downtime};
496 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
497 if (defined($migrate_downtime)) {
498 $self->log('info', "migrate_set_downtime: $migrate_downtime");
499 eval {
500 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
501 };
502 $self->log('info', "migrate_set_downtime error: $@") if $@;
503 }
504
505 $self->log('info', "set migration_caps");
506 eval {
507 PVE::QemuServer::set_migration_caps($vmid);
508 };
509 warn $@ if $@;
510
511 #set cachesize 10% of the total memory
512 my $cachesize = int($conf->{memory}*1048576/10);
513 $self->log('info', "set cachesize: $cachesize");
514 eval {
515 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
516 };
517 $self->log('info', "migrate-set-cache-size error: $@") if $@;
518
519 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
520 my $rpcenv = PVE::RPCEnvironment::get();
521 my $authuser = $rpcenv->get_user();
522
523 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
524
525 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
526 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
527
528 $self->log('info', "spice client_migrate_info");
529
530 eval {
531 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
532 hostname => $proxyticket, 'tls-port' => $spice_port,
533 'cert-subject' => $subject);
534 };
535 $self->log('info', "client_migrate_info error: $@") if $@;
536
537 }
538
539 $self->log('info', "start migrate command to $ruri");
540 eval {
541 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri);
542 };
543 my $merr = $@;
544 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
545
546 my $lstat = 0;
547 my $usleep = 2000000;
548 my $i = 0;
549 my $err_count = 0;
550 my $lastrem = undef;
551 my $downtimecounter = 0;
552 while (1) {
553 $i++;
554 my $avglstat = $lstat/$i if $lstat;
555
556 usleep($usleep);
557 my $stat;
558 eval {
559 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
560 };
561 if (my $err = $@) {
562 $err_count++;
563 warn "query migrate failed: $err\n";
564 $self->log('info', "query migrate failed: $err");
565 if ($err_count <= 5) {
566 usleep(1000000);
567 next;
568 }
569 die "too many query migrate failures - aborting\n";
570 }
571
572 if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) {
573 sleep(1);
574 next;
575 }
576
577 if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
578 $merr = undef;
579 $err_count = 0;
580 if ($stat->{status} eq 'completed') {
581 my $delay = time() - $start;
582 if ($delay > 0) {
583 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
584 my $downtime = $stat->{downtime} || 0;
585 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
586 }
587 }
588
589 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
590 $self->log('info', "migration status error: $stat->{status}");
591 die "aborting\n"
592 }
593
594 if ($stat->{status} ne 'active') {
595 $self->log('info', "migration status: $stat->{status}");
596 last;
597 }
598
599 if ($stat->{ram}->{transferred} ne $lstat) {
600 my $trans = $stat->{ram}->{transferred} || 0;
601 my $rem = $stat->{ram}->{remaining} || 0;
602 my $total = $stat->{ram}->{total} || 0;
603 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
604 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
605 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
606 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
607 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
608 #reduce sleep if remainig memory if lower than the everage transfert
609 $usleep = 300000 if $avglstat && $rem < $avglstat;
610
611 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
612 "remaining ${rem}), total ${total})");
613
614 if (${xbzrlecachesize}) {
615 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
616 }
617
618 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
619 $downtimecounter++;
620 }
621 $lastrem = $rem;
622
623 if ($downtimecounter > 5) {
624 $downtimecounter = 0;
625 $migrate_downtime *= 2;
626 $self->log('info', "migrate_set_downtime: $migrate_downtime");
627 eval {
628 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
629 };
630 $self->log('info', "migrate_set_downtime error: $@") if $@;
631 }
632
633 }
634
635
636 $lstat = $stat->{ram}->{transferred};
637
638 } else {
639 die $merr if $merr;
640 die "unable to parse migration status '$stat->{status}' - aborting\n";
641 }
642 }
643
644 # just to be sure that the tunnel gets closed on successful migration, on error
645 # phase2_cleanup closes it *after* stopping the remote waiting VM
646 if (!$self->{errors} && $self->{tunnel}) {
647 eval { finish_tunnel($self, $self->{tunnel}); };
648 if (my $err = $@) {
649 $self->log('err', $err);
650 $self->{errors} = 1;
651 }
652 }
653 }
654
655 sub phase2_cleanup {
656 my ($self, $vmid, $err) = @_;
657
658 return if !$self->{errors};
659 $self->{phase2errors} = 1;
660
661 $self->log('info', "aborting phase 2 - cleanup resources");
662
663 $self->log('info', "migrate_cancel");
664 eval {
665 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
666 };
667 $self->log('info', "migrate_cancel error: $@") if $@;
668
669 my $conf = $self->{vmconf};
670 delete $conf->{lock};
671 eval { PVE::QemuConfig->write_config($vmid, $conf) };
672 if (my $err = $@) {
673 $self->log('err', $err);
674 }
675
676 # cleanup ressources on target host
677 my $nodename = PVE::INotify::nodename();
678
679 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
680 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
681 if (my $err = $@) {
682 $self->log('err', $err);
683 $self->{errors} = 1;
684 }
685
686 if ($self->{tunnel}) {
687 eval { finish_tunnel($self, $self->{tunnel}); };
688 if (my $err = $@) {
689 $self->log('err', $err);
690 $self->{errors} = 1;
691 }
692 }
693 }
694
695 sub phase3 {
696 my ($self, $vmid) = @_;
697
698 my $volids = $self->{volumes};
699 return if $self->{phase2errors};
700
701 # destroy local copies
702 foreach my $volid (@$volids) {
703 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
704 if (my $err = $@) {
705 $self->log('err', "removing local copy of '$volid' failed - $err");
706 $self->{errors} = 1;
707 last if $err =~ /^interrupted by signal$/;
708 }
709 }
710 }
711
712 sub phase3_cleanup {
713 my ($self, $vmid, $err) = @_;
714
715 my $conf = $self->{vmconf};
716 return if $self->{phase2errors};
717
718 # move config to remote node
719 my $conffile = PVE::QemuConfig->config_file($vmid);
720 my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
721
722 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
723 if !rename($conffile, $newconffile);
724
725 if ($self->{livemigration}) {
726 # now that config file is move, we can resume vm on target if livemigrate
727 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
728 eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
729 errfunc => sub {
730 my $line = shift;
731 $self->log('err', $line);
732 });
733 };
734 if (my $err = $@) {
735 $self->log('err', $err);
736 $self->{errors} = 1;
737 }
738 }
739
740 eval {
741
742 my $timer = 0;
743 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
744 $self->log('info', "Waiting for spice server migration");
745 while (1) {
746 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
747 last if int($res->{'migrated'}) == 1;
748 last if $timer > 50;
749 $timer ++;
750 usleep(200000);
751 }
752 }
753 };
754
755 # always stop local VM
756 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
757 if (my $err = $@) {
758 $self->log('err', "stopping vm failed - $err");
759 $self->{errors} = 1;
760 }
761
762 # always deactivate volumes - avoid lvm LVs to be active on several nodes
763 eval {
764 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
765 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
766 };
767 if (my $err = $@) {
768 $self->log('err', $err);
769 $self->{errors} = 1;
770 }
771
772 # clear migrate lock
773 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
774 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
775 }
776
777 sub final_cleanup {
778 my ($self, $vmid) = @_;
779
780 # nothing to do
781 }
782
783 1;