]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
PVE/QemuMigrate.pm: fix syntax errors
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use POSIX qw( WNOHANG );
9 use PVE::INotify;
10 use PVE::Tools;
11 use PVE::Cluster;
12 use PVE::Storage;
13 use PVE::QemuServer;
14 use Time::HiRes qw( usleep );
15 use PVE::RPCEnvironment;
16
17 use base qw(PVE::AbstractMigrate);
18
19 sub fork_command_pipe {
20 my ($self, $cmd) = @_;
21
22 my $reader = IO::File->new();
23 my $writer = IO::File->new();
24
25 my $orig_pid = $$;
26
27 my $cpid;
28
29 eval { $cpid = open2($reader, $writer, @$cmd); };
30
31 my $err = $@;
32
33 # catch exec errors
34 if ($orig_pid != $$) {
35 $self->log('err', "can't fork command pipe\n");
36 POSIX::_exit(1);
37 kill('KILL', $$);
38 }
39
40 die $err if $err;
41
42 return { writer => $writer, reader => $reader, pid => $cpid };
43 }
44
45 sub finish_command_pipe {
46 my ($self, $cmdpipe, $timeout) = @_;
47
48 my $cpid = $cmdpipe->{pid};
49 return if !defined($cpid);
50
51 my $writer = $cmdpipe->{writer};
52 my $reader = $cmdpipe->{reader};
53
54 $writer->close();
55 $reader->close();
56
57 my $collect_child_process = sub {
58 my $res = waitpid($cpid, WNOHANG);
59 if (defined($res) && ($res == $cpid)) {
60 delete $cmdpipe->{cpid};
61 return 1;
62 } else {
63 return 0;
64 }
65 };
66
67 if ($timeout) {
68 for (my $i = 0; $i < $timeout; $i++) {
69 return if &$collect_child_process();
70 sleep(1);
71 }
72 }
73
74 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
75 kill(15, $cpid);
76
77 # wait again
78 for (my $i = 0; $i < 10; $i++) {
79 return if &$collect_child_process();
80 sleep(1);
81 }
82
83 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
84 kill 9, $cpid;
85 sleep 1;
86
87 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
88 if !&$collect_child_process();
89 }
90
91 sub fork_tunnel {
92 my ($self, $tunnel_addr) = @_;
93
94 my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
95
96 my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ];
97
98 my $tunnel = $self->fork_command_pipe($cmd);
99
100 my $reader = $tunnel->{reader};
101
102 my $helo;
103 eval {
104 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
105 die "no reply\n" if !$helo;
106 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
107 die "got strange reply from mtunnel ('$helo')\n"
108 if $helo !~ m/^tunnel online$/;
109 };
110 my $err = $@;
111
112 if ($err) {
113 $self->finish_command_pipe($tunnel);
114 die "can't open migration tunnel - $err";
115 }
116 return $tunnel;
117 }
118
119 sub finish_tunnel {
120 my ($self, $tunnel) = @_;
121
122 my $writer = $tunnel->{writer};
123
124 eval {
125 PVE::Tools::run_with_timeout(30, sub {
126 print $writer "quit\n";
127 $writer->flush();
128 });
129 };
130 my $err = $@;
131
132 $self->finish_command_pipe($tunnel, 30);
133
134 if ($tunnel->{sock_addr}) {
135 # ssh does not clean up on local host
136 my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; #
137 PVE::Tools::run_command($cmd);
138
139 # .. and just to be sure check on remote side
140 unshift @{$cmd}, @{$self->{rem_ssh}};
141 PVE::Tools::run_command($cmd);
142 }
143
144 die $err if $err;
145 }
146
147 sub lock_vm {
148 my ($self, $vmid, $code, @param) = @_;
149
150 return PVE::QemuConfig->lock_config($vmid, $code, @param);
151 }
152
153 sub prepare {
154 my ($self, $vmid) = @_;
155
156 my $online = $self->{opts}->{online};
157
158 $self->{storecfg} = PVE::Storage::config();
159
160 # test if VM exists
161 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
162
163 PVE::QemuConfig->check_lock($conf);
164
165 my $running = 0;
166 if (my $pid = PVE::QemuServer::check_running($vmid)) {
167 die "can't migrate running VM without --online\n" if !$online;
168 $running = $pid;
169
170 $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
171
172 }
173
174 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
175 if ($self->{running} || !$self->{opts}->{force}) {
176 die "can't migrate VM which uses local devices\n";
177 } else {
178 $self->log('info', "migrating VM which uses local devices");
179 }
180 }
181
182 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
183
184 my $need_activate = [];
185 foreach my $volid (@$vollist) {
186 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
187
188 # check if storage is available on both nodes
189 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
190
191 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
192 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
193
194 if ($scfg->{shared}) {
195 # PVE::Storage::activate_storage checks this for non-shared storages
196 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
197 warn "Used shared storage '$sid' is not online on source node!\n"
198 if !$plugin->check_connection($sid, $scfg);
199 } else {
200 # only activate if not shared
201 push @$need_activate, $volid;
202 }
203 }
204
205 # activate volumes
206 PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
207
208 # test ssh connection
209 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
210 eval { $self->cmd_quiet($cmd); };
211 die "Can't connect to destination address using public key\n" if $@;
212
213 return $running;
214 }
215
216 sub sync_disks {
217 my ($self, $vmid) = @_;
218
219 my $conf = $self->{vmconf};
220
221 # local volumes which have been copied
222 $self->{volumes} = [];
223
224 my $res = [];
225
226 eval {
227
228 # found local volumes and their origin
229 my $local_volumes = {};
230 my $local_volumes_errors = {};
231 my $other_errors = [];
232 my $abort = 0;
233
234 my $sharedvm = 1;
235
236 my $log_error = sub {
237 my ($msg, $volid) = @_;
238
239 if (defined($volid)) {
240 $local_volumes_errors->{$volid} = $msg;
241 } else {
242 push @$other_errors, $msg;
243 }
244 $abort = 1;
245 };
246
247 my @sids = PVE::Storage::storage_ids($self->{storecfg});
248 foreach my $storeid (@sids) {
249 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
250 next if $scfg->{shared};
251 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
252
253 # get list from PVE::Storage (for unused volumes)
254 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
255
256 next if @{$dl->{$storeid}} == 0;
257
258 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $storeid;
259
260 # check if storage is available on target node
261 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
262 $sharedvm = 0; # there is a non-shared disk
263
264 PVE::Storage::foreach_volid($dl, sub {
265 my ($volid, $sid, $volname) = @_;
266
267 $local_volumes->{$volid} = 'storage';
268 });
269 }
270
271 my $test_volid = sub {
272 my ($volid, $attr) = @_;
273
274 if ($volid =~ m|^/|) {
275 $local_volumes->{$volid} = 'config';
276 die "local file/device\n";
277 }
278
279 my $snaprefs = $attr->{referenced_in_snapshot};
280
281 if ($attr->{cdrom}) {
282 if ($volid eq 'cdrom') {
283 my $msg = "can't migrate local cdrom drive";
284 if (defined($snaprefs) && !$attr->{referenced_in_config}) {
285 my $snapnames = join(', ', sort keys %$snaprefs);
286 $msg .= " (referenced in snapshot - $snapnames)";
287 }
288 &$log_error("$msg\n");
289 return;
290 }
291 return if $volid eq 'none';
292 }
293
294 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
295
296 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
297 # check if storage is available on both nodes
298 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
299 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
300
301 return if $scfg->{shared};
302
303 $sharedvm = 0;
304
305 $local_volumes->{$volid} = $attr->{referenced_in_config} ? 'config' : 'snapshot';
306
307 die "local cdrom image\n" if $attr->{cdrom};
308
309 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
310
311 die "owned by other VM (owner = VM $owner)\n"
312 if !$owner || ($owner != $self->{vmid});
313
314 if (defined($snaprefs)) {
315 # we cannot migrate shapshots on local storage
316 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
317
318 my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
319 die "online storage migration not possible if snapshot exists\n" if $self->{running};
320 if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) {
321 die "non-migratable snapshot exists\n";
322 }
323 }
324
325 die "referenced by linked clone(s)\n"
326 if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
327 };
328
329 PVE::QemuServer::foreach_volid($conf, sub {
330 my ($volid, $attr) = @_;
331 eval { $test_volid->($volid, $attr); };
332 if (my $err = $@) {
333 &$log_error($err, $volid);
334 }
335 });
336
337 foreach my $vol (sort keys %$local_volumes) {
338 if ($local_volumes->{$vol} eq 'storage') {
339 $self->log('info', "found local disk '$vol' (via storage)\n");
340 } elsif ($local_volumes->{$vol} eq 'config') {
341 die "can't live migrate attached local disks without with-local-disks option\n" if $self->{running} && !$self->{opts}->{"with-local-disks"};
342 $self->log('info', "found local disk '$vol' (in current VM config)\n");
343 } elsif ($local_volumes->{$vol} eq 'snapshot') {
344 $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
345 } else {
346 $self->log('info', "found local disk '$vol'\n");
347 }
348 }
349
350 foreach my $vol (sort keys %$local_volumes_errors) {
351 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
352 }
353 foreach my $err (@$other_errors) {
354 $self->log('warn', "$err");
355 }
356
357 if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) {
358 $self->{opts}->{targetstorage} = 1; #use same sid for remote local
359 }
360
361 if ($abort) {
362 die "can't migrate VM - check log\n";
363 }
364
365 # additional checks for local storage
366 foreach my $volid (keys %$local_volumes) {
367 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
368 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
369
370 my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') ||
371 ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm');
372
373 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
374 if !$migratable;
375
376 # image is a linked clone on local storage, se we can't migrate.
377 if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
378 die "can't migrate '$volid' as it's a clone of '$basename'";
379 }
380 }
381
382 $self->log('info', "copying disk images");
383
384 foreach my $volid (keys %$local_volumes) {
385 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
386 if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid} eq 'config') {
387 push @{$self->{online_local_volumes}}, $volid;
388 } else {
389 push @{$self->{volumes}}, $volid;
390 my $insecure = $self->{opts}->{migration_type} eq 'insecure';
391 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid, undef, undef, undef, undef, $insecure);
392 }
393 }
394 };
395 die "Failed to sync data - $@" if $@;
396 }
397
398 sub cleanup_remotedisks {
399 my ($self) = @_;
400
401 foreach my $target_drive (keys %{$self->{target_drive}}) {
402
403 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
404 my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
405
406 my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
407
408 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
409 if (my $err = $@) {
410 $self->log('err', $err);
411 $self->{errors} = 1;
412 }
413 }
414 }
415
416 sub phase1 {
417 my ($self, $vmid) = @_;
418
419 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
420
421 my $conf = $self->{vmconf};
422
423 # set migrate lock in config file
424 $conf->{lock} = 'migrate';
425 PVE::QemuConfig->write_config($vmid, $conf);
426
427 sync_disks($self, $vmid);
428
429 };
430
431 sub phase1_cleanup {
432 my ($self, $vmid, $err) = @_;
433
434 $self->log('info', "aborting phase 1 - cleanup resources");
435
436 my $conf = $self->{vmconf};
437 delete $conf->{lock};
438 eval { PVE::QemuConfig->write_config($vmid, $conf) };
439 if (my $err = $@) {
440 $self->log('err', $err);
441 }
442
443 if ($self->{volumes}) {
444 foreach my $volid (@{$self->{volumes}}) {
445 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
446 # fixme: try to remove ?
447 }
448 }
449 }
450
451 sub phase2 {
452 my ($self, $vmid) = @_;
453
454 my $conf = $self->{vmconf};
455
456 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
457
458 my $raddr;
459 my $rport;
460 my $ruri; # the whole migration dst. URI (protocol:address[:port])
461 my $nodename = PVE::INotify::nodename();
462
463 ## start on remote node
464 my $cmd = [@{$self->{rem_ssh}}];
465
466 my $spice_ticket;
467 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
468 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
469 $spice_ticket = $res->{ticket};
470 }
471
472 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
473
474 my $migration_type = $self->{opts}->{migration_type};
475
476 push @$cmd, '--migration_type', $migration_type;
477
478 push @$cmd, '--migration_network', $self->{opts}->{migration_network}
479 if $self->{opts}->{migration_network};
480
481 if ($migration_type eq 'insecure') {
482 push @$cmd, '--stateuri', 'tcp';
483 } else {
484 push @$cmd, '--stateuri', 'unix';
485 }
486
487 if ($self->{forcemachine}) {
488 push @$cmd, '--machine', $self->{forcemachine};
489 }
490
491 if ($self->{opts}->{targetstorage}) {
492 push @$cmd, '--targetstorage', $self->{opts}->{targetstorage};
493 }
494
495 my $spice_port;
496
497 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
498 # instead we pipe it through STDIN
499 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
500 my $line = shift;
501
502 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
503 $raddr = $1;
504 $rport = int($2);
505 $ruri = "tcp:$raddr:$rport";
506 }
507 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
508 $raddr = $1;
509 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
510 $ruri = "unix:$raddr";
511 }
512 elsif ($line =~ m/^migration listens on port (\d+)$/) {
513 $raddr = "localhost";
514 $rport = int($1);
515 $ruri = "tcp:$raddr:$rport";
516 }
517 elsif ($line =~ m/^spice listens on port (\d+)$/) {
518 $spice_port = int($1);
519 }
520 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
521 my $volid = $4;
522 my $nbd_uri = "nbd:$1:$2:exportname=$3";
523 my $targetdrive = $3;
524 $targetdrive =~ s/drive-//g;
525
526 $self->{target_drive}->{$targetdrive}->{volid} = $volid;
527 $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
528
529 }
530 }, errfunc => sub {
531 my $line = shift;
532 $self->log('info', $line);
533 });
534
535 die "unable to detect remote migration address\n" if !$raddr;
536
537 if ($migration_type eq 'secure') {
538 $self->log('info', "start remote tunnel");
539
540 if ($ruri =~ /^unix:/) {
541 unlink $raddr;
542 $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr");
543 $self->{tunnel}->{sock_addr} = $raddr;
544
545 my $unix_socket_try = 0; # wait for the socket to become ready
546 while (! -S $raddr) {
547 $unix_socket_try++;
548 if ($unix_socket_try > 100) {
549 $self->{errors} = 1;
550 $self->finish_tunnel($self->{tunnel});
551 die "Timeout, migration socket $ruri did not get ready";
552 }
553
554 usleep(50000);
555 }
556
557 } elsif ($ruri =~ /^tcp:/) {
558 my $tunnel_addr;
559 if ($raddr eq "localhost") {
560 # for backwards compatibility with older qemu-server versions
561 my $pfamily = PVE::Tools::get_host_address_family($nodename);
562 my $lport = PVE::Tools::next_migrate_port($pfamily);
563 $tunnel_addr = "$lport:localhost:$rport";
564 }
565
566 $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
567
568 } else {
569 die "unsupported protocol in migration URI: $ruri\n";
570 }
571 }
572
573 my $start = time();
574
575 if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) {
576 $self->{storage_migration} = 1;
577 $self->{storage_migration_jobs} = {};
578 $self->log('info', "starting storage migration");
579
580 die "The number of local disks does not match between the source and the destination.\n"
581 if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
582 foreach my $drive (keys %{$self->{target_drive}}){
583 my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
584 $self->log('info', "$drive: start migration to to $nbd_uri");
585 PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1);
586 }
587 }
588
589 $self->log('info', "starting online/live migration on $ruri");
590 $self->{livemigration} = 1;
591
592 # load_defaults
593 my $defaults = PVE::QemuServer::load_defaults();
594
595 # always set migrate speed (overwrite kvm default of 32m)
596 # we set a very hight default of 8192m which is basically unlimited
597 my $migrate_speed = $defaults->{migrate_speed} || 8192;
598 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
599 $migrate_speed = $migrate_speed * 1048576;
600 $self->log('info', "migrate_set_speed: $migrate_speed");
601 eval {
602 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
603 };
604 $self->log('info', "migrate_set_speed error: $@") if $@;
605
606 my $migrate_downtime = $defaults->{migrate_downtime};
607 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
608 if (defined($migrate_downtime)) {
609 $self->log('info', "migrate_set_downtime: $migrate_downtime");
610 eval {
611 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
612 };
613 $self->log('info', "migrate_set_downtime error: $@") if $@;
614 }
615
616 $self->log('info', "set migration_caps");
617 eval {
618 PVE::QemuServer::set_migration_caps($vmid);
619 };
620 warn $@ if $@;
621
622 #set cachesize 10% of the total memory
623 my $cachesize = int($conf->{memory}*1048576/10);
624 $self->log('info', "set cachesize: $cachesize");
625 eval {
626 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
627 };
628 $self->log('info', "migrate-set-cache-size error: $@") if $@;
629
630 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
631 my $rpcenv = PVE::RPCEnvironment::get();
632 my $authuser = $rpcenv->get_user();
633
634 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
635
636 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
637 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
638
639 $self->log('info', "spice client_migrate_info");
640
641 eval {
642 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
643 hostname => $proxyticket, 'tls-port' => $spice_port,
644 'cert-subject' => $subject);
645 };
646 $self->log('info', "client_migrate_info error: $@") if $@;
647
648 }
649
650 $self->log('info', "start migrate command to $ruri");
651 eval {
652 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri);
653 };
654 my $merr = $@;
655 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
656
657 my $lstat = 0;
658 my $usleep = 2000000;
659 my $i = 0;
660 my $err_count = 0;
661 my $lastrem = undef;
662 my $downtimecounter = 0;
663 while (1) {
664 $i++;
665 my $avglstat = $lstat/$i if $lstat;
666
667 usleep($usleep);
668 my $stat;
669 eval {
670 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
671 };
672 if (my $err = $@) {
673 $err_count++;
674 warn "query migrate failed: $err\n";
675 $self->log('info', "query migrate failed: $err");
676 if ($err_count <= 5) {
677 usleep(1000000);
678 next;
679 }
680 die "too many query migrate failures - aborting\n";
681 }
682
683 if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) {
684 sleep(1);
685 next;
686 }
687
688 if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
689 $merr = undef;
690 $err_count = 0;
691 if ($stat->{status} eq 'completed') {
692 my $delay = time() - $start;
693 if ($delay > 0) {
694 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
695 my $downtime = $stat->{downtime} || 0;
696 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
697 }
698 }
699
700 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
701 $self->log('info', "migration status error: $stat->{status}");
702 die "aborting\n"
703 }
704
705 if ($stat->{status} ne 'active') {
706 $self->log('info', "migration status: $stat->{status}");
707 last;
708 }
709
710 if ($stat->{ram}->{transferred} ne $lstat) {
711 my $trans = $stat->{ram}->{transferred} || 0;
712 my $rem = $stat->{ram}->{remaining} || 0;
713 my $total = $stat->{ram}->{total} || 0;
714 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
715 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
716 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
717 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
718 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
719 #reduce sleep if remainig memory if lower than the everage transfert
720 $usleep = 300000 if $avglstat && $rem < $avglstat;
721
722 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
723 "remaining ${rem}), total ${total})");
724
725 if (${xbzrlecachesize}) {
726 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
727 }
728
729 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
730 $downtimecounter++;
731 }
732 $lastrem = $rem;
733
734 if ($downtimecounter > 5) {
735 $downtimecounter = 0;
736 $migrate_downtime *= 2;
737 $self->log('info', "migrate_set_downtime: $migrate_downtime");
738 eval {
739 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
740 };
741 $self->log('info', "migrate_set_downtime error: $@") if $@;
742 }
743
744 }
745
746
747 $lstat = $stat->{ram}->{transferred};
748
749 } else {
750 die $merr if $merr;
751 die "unable to parse migration status '$stat->{status}' - aborting\n";
752 }
753 }
754
755 # just to be sure that the tunnel gets closed on successful migration, on error
756 # phase2_cleanup closes it *after* stopping the remote waiting VM
757 if (!$self->{errors} && $self->{tunnel}) {
758 eval { finish_tunnel($self, $self->{tunnel}); };
759 if (my $err = $@) {
760 $self->log('err', $err);
761 $self->{errors} = 1;
762 }
763 }
764 }
765
766 sub phase2_cleanup {
767 my ($self, $vmid, $err) = @_;
768
769 return if !$self->{errors};
770 $self->{phase2errors} = 1;
771
772 $self->log('info', "aborting phase 2 - cleanup resources");
773
774 $self->log('info', "migrate_cancel");
775 eval {
776 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
777 };
778 $self->log('info', "migrate_cancel error: $@") if $@;
779
780 my $conf = $self->{vmconf};
781 delete $conf->{lock};
782 eval { PVE::QemuConfig->write_config($vmid, $conf) };
783 if (my $err = $@) {
784 $self->log('err', $err);
785 }
786
787 # cleanup ressources on target host
788 if ($self->{storage_migration}) {
789
790 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
791 if (my $err = $@) {
792 $self->log('err', $err);
793 }
794
795 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
796 if (my $err = $@) {
797 $self->log('err', $err);
798 }
799 }
800
801 my $nodename = PVE::INotify::nodename();
802
803 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
804 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
805 if (my $err = $@) {
806 $self->log('err', $err);
807 $self->{errors} = 1;
808 }
809
810 if ($self->{tunnel}) {
811 eval { finish_tunnel($self, $self->{tunnel}); };
812 if (my $err = $@) {
813 $self->log('err', $err);
814 $self->{errors} = 1;
815 }
816 }
817 }
818
819 sub phase3 {
820 my ($self, $vmid) = @_;
821
822 my $volids = $self->{volumes};
823 return if $self->{phase2errors};
824
825 # destroy local copies
826 foreach my $volid (@$volids) {
827 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
828 if (my $err = $@) {
829 $self->log('err', "removing local copy of '$volid' failed - $err");
830 $self->{errors} = 1;
831 last if $err =~ /^interrupted by signal$/;
832 }
833 }
834 }
835
836 sub phase3_cleanup {
837 my ($self, $vmid, $err) = @_;
838
839 my $conf = $self->{vmconf};
840 return if $self->{phase2errors};
841
842 if ($self->{storage_migration}) {
843 # finish block-job
844 eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); };
845
846 if (my $err = $@) {
847 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
848 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
849 die "Failed to completed storage migration\n";
850 } else {
851 foreach my $target_drive (keys %{$self->{target_drive}}) {
852 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
853 $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
854 PVE::QemuConfig->write_config($vmid, $conf);
855 }
856 }
857 }
858
859 # move config to remote node
860 my $conffile = PVE::QemuConfig->config_file($vmid);
861 my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
862
863 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
864 if !rename($conffile, $newconffile);
865
866 if ($self->{livemigration}) {
867 if ($self->{storage_migration}) {
868 # remove drives referencing the nbd server from source
869 # otherwise vm_stop might hang later on
870 foreach my $drive (keys %{$self->{target_drive}}){
871 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "device_del", id => $drive);
872 }
873 # stop nbd server on remote vm - requirement for resume since 2.9
874 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
875
876 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
877 if (my $err = $@) {
878 $self->log('err', $err);
879 $self->{errors} = 1;
880 }
881 }
882 # config moved and nbd server stopped - now we can resume vm on target
883 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
884 eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
885 errfunc => sub {
886 my $line = shift;
887 $self->log('err', $line);
888 });
889 };
890 if (my $err = $@) {
891 $self->log('err', $err);
892 $self->{errors} = 1;
893 }
894 }
895
896 eval {
897 my $timer = 0;
898 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
899 $self->log('info', "Waiting for spice server migration");
900 while (1) {
901 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
902 last if int($res->{'migrated'}) == 1;
903 last if $timer > 50;
904 $timer ++;
905 usleep(200000);
906 }
907 }
908 };
909
910 # always stop local VM
911 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
912 if (my $err = $@) {
913 $self->log('err', "stopping vm failed - $err");
914 $self->{errors} = 1;
915 }
916
917 # always deactivate volumes - avoid lvm LVs to be active on several nodes
918 eval {
919 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
920 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
921 };
922 if (my $err = $@) {
923 $self->log('err', $err);
924 $self->{errors} = 1;
925 }
926
927 if($self->{storage_migration}) {
928 # destroy local copies
929 my $volids = $self->{online_local_volumes};
930
931 foreach my $volid (@$volids) {
932 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
933 if (my $err = $@) {
934 $self->log('err', "removing local copy of '$volid' failed - $err");
935 $self->{errors} = 1;
936 last if $err =~ /^interrupted by signal$/;
937 }
938 }
939
940 }
941
942 # clear migrate lock
943 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
944 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
945 }
946
947 sub final_cleanup {
948 my ($self, $vmid) = @_;
949
950 # nothing to do
951 }
952
953 1;