]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
Fix #1441: Do not unplug controllers when the mirroring is finished
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use POSIX qw( WNOHANG );
9 use PVE::INotify;
10 use PVE::Tools;
11 use PVE::Cluster;
12 use PVE::Storage;
13 use PVE::QemuServer;
14 use Time::HiRes qw( usleep );
15 use PVE::RPCEnvironment;
16 use PVE::ReplicationConfig;
17 use PVE::ReplicationState;
18 use PVE::Replication;
19
20 use base qw(PVE::AbstractMigrate);
21
22 sub fork_command_pipe {
23 my ($self, $cmd) = @_;
24
25 my $reader = IO::File->new();
26 my $writer = IO::File->new();
27
28 my $orig_pid = $$;
29
30 my $cpid;
31
32 eval { $cpid = open2($reader, $writer, @$cmd); };
33
34 my $err = $@;
35
36 # catch exec errors
37 if ($orig_pid != $$) {
38 $self->log('err', "can't fork command pipe\n");
39 POSIX::_exit(1);
40 kill('KILL', $$);
41 }
42
43 die $err if $err;
44
45 return { writer => $writer, reader => $reader, pid => $cpid };
46 }
47
48 sub finish_command_pipe {
49 my ($self, $cmdpipe, $timeout) = @_;
50
51 my $cpid = $cmdpipe->{pid};
52 return if !defined($cpid);
53
54 my $writer = $cmdpipe->{writer};
55 my $reader = $cmdpipe->{reader};
56
57 $writer->close();
58 $reader->close();
59
60 my $collect_child_process = sub {
61 my $res = waitpid($cpid, WNOHANG);
62 if (defined($res) && ($res == $cpid)) {
63 delete $cmdpipe->{cpid};
64 return 1;
65 } else {
66 return 0;
67 }
68 };
69
70 if ($timeout) {
71 for (my $i = 0; $i < $timeout; $i++) {
72 return if &$collect_child_process();
73 sleep(1);
74 }
75 }
76
77 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
78 kill(15, $cpid);
79
80 # wait again
81 for (my $i = 0; $i < 10; $i++) {
82 return if &$collect_child_process();
83 sleep(1);
84 }
85
86 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
87 kill 9, $cpid;
88 sleep 1;
89
90 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
91 if !&$collect_child_process();
92 }
93
94 sub read_tunnel {
95 my ($self, $tunnel, $timeout) = @_;
96
97 $timeout = 60 if !defined($timeout);
98
99 my $reader = $tunnel->{reader};
100
101 my $output;
102 eval {
103 PVE::Tools::run_with_timeout($timeout, sub { $output = <$reader>; });
104 };
105 die "reading from tunnel failed: $@\n" if $@;
106
107 chomp $output;
108
109 return $output;
110 }
111
112 sub write_tunnel {
113 my ($self, $tunnel, $timeout, $command) = @_;
114
115 $timeout = 60 if !defined($timeout);
116
117 my $writer = $tunnel->{writer};
118
119 eval {
120 PVE::Tools::run_with_timeout($timeout, sub {
121 print $writer "$command\n";
122 $writer->flush();
123 });
124 };
125 die "writing to tunnel failed: $@\n" if $@;
126
127 if ($tunnel->{version} && $tunnel->{version} >= 1) {
128 my $res = eval { $self->read_tunnel($tunnel, 10); };
129 die "no reply to command '$command': $@\n" if $@;
130
131 if ($res eq 'OK') {
132 return;
133 } else {
134 die "tunnel replied '$res' to command '$command'\n";
135 }
136 }
137 }
138
139 sub fork_tunnel {
140 my ($self, $tunnel_addr) = @_;
141
142 my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
143
144 my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ];
145
146 my $tunnel = $self->fork_command_pipe($cmd);
147
148 eval {
149 my $helo = $self->read_tunnel($tunnel, 60);
150 die "no reply\n" if !$helo;
151 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
152 die "got strange reply from mtunnel ('$helo')\n"
153 if $helo !~ m/^tunnel online$/;
154 };
155 my $err = $@;
156
157 eval {
158 my $ver = $self->read_tunnel($tunnel, 10);
159 if ($ver =~ /^ver (\d+)$/) {
160 $tunnel->{version} = $1;
161 $self->log('info', "ssh tunnel $ver\n");
162 } else {
163 $err = "received invalid tunnel version string '$ver'\n" if !$err;
164 }
165 };
166
167 if ($err) {
168 $self->finish_command_pipe($tunnel);
169 die "can't open migration tunnel - $err";
170 }
171 return $tunnel;
172 }
173
174 sub finish_tunnel {
175 my ($self, $tunnel) = @_;
176
177 eval { $self->write_tunnel($tunnel, 30, 'quit'); };
178 my $err = $@;
179
180 $self->finish_command_pipe($tunnel, 30);
181
182 if ($tunnel->{sock_addr}) {
183 # ssh does not clean up on local host
184 my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; #
185 PVE::Tools::run_command($cmd);
186
187 # .. and just to be sure check on remote side
188 unshift @{$cmd}, @{$self->{rem_ssh}};
189 PVE::Tools::run_command($cmd);
190 }
191
192 die $err if $err;
193 }
194
195 sub lock_vm {
196 my ($self, $vmid, $code, @param) = @_;
197
198 return PVE::QemuConfig->lock_config($vmid, $code, @param);
199 }
200
201 sub prepare {
202 my ($self, $vmid) = @_;
203
204 my $online = $self->{opts}->{online};
205
206 $self->{storecfg} = PVE::Storage::config();
207
208 # test if VM exists
209 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
210
211 PVE::QemuConfig->check_lock($conf);
212
213 my $running = 0;
214 if (my $pid = PVE::QemuServer::check_running($vmid)) {
215 die "can't migrate running VM without --online\n" if !$online;
216 $running = $pid;
217
218 $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
219
220 }
221
222 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
223 if ($self->{running} || !$self->{opts}->{force}) {
224 die "can't migrate VM which uses local devices\n";
225 } else {
226 $self->log('info', "migrating VM which uses local devices");
227 }
228 }
229
230 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
231
232 my $need_activate = [];
233 foreach my $volid (@$vollist) {
234 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
235
236 # check if storage is available on both nodes
237 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
238
239 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
240 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
241
242 if ($scfg->{shared}) {
243 # PVE::Storage::activate_storage checks this for non-shared storages
244 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
245 warn "Used shared storage '$sid' is not online on source node!\n"
246 if !$plugin->check_connection($sid, $scfg);
247 } else {
248 # only activate if not shared
249 push @$need_activate, $volid;
250 }
251 }
252
253 # activate volumes
254 PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
255
256 # test ssh connection
257 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
258 eval { $self->cmd_quiet($cmd); };
259 die "Can't connect to destination address using public key\n" if $@;
260
261 return $running;
262 }
263
264 sub sync_disks {
265 my ($self, $vmid) = @_;
266
267 my $conf = $self->{vmconf};
268
269 # local volumes which have been copied
270 $self->{volumes} = [];
271
272 my $res = [];
273
274 eval {
275
276 # found local volumes and their origin
277 my $local_volumes = {};
278 my $local_volumes_errors = {};
279 my $other_errors = [];
280 my $abort = 0;
281
282 my $sharedvm = 1;
283
284 my $log_error = sub {
285 my ($msg, $volid) = @_;
286
287 if (defined($volid)) {
288 $local_volumes_errors->{$volid} = $msg;
289 } else {
290 push @$other_errors, $msg;
291 }
292 $abort = 1;
293 };
294
295 my @sids = PVE::Storage::storage_ids($self->{storecfg});
296 foreach my $storeid (@sids) {
297 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
298 next if $scfg->{shared};
299 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
300
301 # get list from PVE::Storage (for unused volumes)
302 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
303
304 next if @{$dl->{$storeid}} == 0;
305
306 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $storeid;
307
308 # check if storage is available on target node
309 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
310 $sharedvm = 0; # there is a non-shared disk
311
312 PVE::Storage::foreach_volid($dl, sub {
313 my ($volid, $sid, $volname) = @_;
314
315 $local_volumes->{$volid}->{ref} = 'storage';
316 });
317 }
318
319 my $test_volid = sub {
320 my ($volid, $attr) = @_;
321
322 if ($volid =~ m|^/|) {
323 $local_volumes->{$volid}->{ref} = 'config';
324 die "local file/device\n";
325 }
326
327 my $snaprefs = $attr->{referenced_in_snapshot};
328
329 if ($attr->{cdrom}) {
330 if ($volid eq 'cdrom') {
331 my $msg = "can't migrate local cdrom drive";
332 if (defined($snaprefs) && !$attr->{referenced_in_config}) {
333 my $snapnames = join(', ', sort keys %$snaprefs);
334 $msg .= " (referenced in snapshot - $snapnames)";
335 }
336 &$log_error("$msg\n");
337 return;
338 }
339 return if $volid eq 'none';
340 }
341
342 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
343
344 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
345 # check if storage is available on both nodes
346 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
347 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
348
349 return if $scfg->{shared};
350
351 $sharedvm = 0;
352
353 $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot';
354
355 die "local cdrom image\n" if $attr->{cdrom};
356
357 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
358
359 die "owned by other VM (owner = VM $owner)\n"
360 if !$owner || ($owner != $self->{vmid});
361
362 my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
363 $local_volumes->{$volid}->{snapshots} = defined($snaprefs) || ($format =~ /^(?:qcow2|vmdk)$/);
364 if (defined($snaprefs)) {
365 # we cannot migrate shapshots on local storage
366 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
367
368 die "online storage migration not possible if snapshot exists\n" if $self->{running};
369 if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) {
370 die "non-migratable snapshot exists\n";
371 }
372 }
373
374 die "referenced by linked clone(s)\n"
375 if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
376 };
377
378 PVE::QemuServer::foreach_volid($conf, sub {
379 my ($volid, $attr) = @_;
380 eval { $test_volid->($volid, $attr); };
381 if (my $err = $@) {
382 &$log_error($err, $volid);
383 }
384 });
385
386 foreach my $vol (sort keys %$local_volumes) {
387 my $ref = $local_volumes->{$vol}->{ref};
388 if ($ref eq 'storage') {
389 $self->log('info', "found local disk '$vol' (via storage)\n");
390 } elsif ($ref eq 'config') {
391 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
392 if $self->{running} && !$self->{opts}->{"with-local-disks"};
393 $self->log('info', "found local disk '$vol' (in current VM config)\n");
394 } elsif ($ref eq 'snapshot') {
395 $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
396 } else {
397 $self->log('info', "found local disk '$vol'\n");
398 }
399 }
400
401 foreach my $vol (sort keys %$local_volumes_errors) {
402 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
403 }
404 foreach my $err (@$other_errors) {
405 $self->log('warn', "$err");
406 }
407
408 if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) {
409 $self->{opts}->{targetstorage} = 1; #use same sid for remote local
410 }
411
412 if ($abort) {
413 die "can't migrate VM - check log\n";
414 }
415
416 # additional checks for local storage
417 foreach my $volid (keys %$local_volumes) {
418 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
419 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
420
421 my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') ||
422 ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm');
423
424 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
425 if !$migratable;
426
427 # image is a linked clone on local storage, se we can't migrate.
428 if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
429 die "can't migrate '$volid' as it's a clone of '$basename'";
430 }
431 }
432
433 my $rep_volumes;
434
435 $self->log('info', "copying disk images");
436
437 my $rep_cfg = PVE::ReplicationConfig->new();
438
439 if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) {
440 die "can't live migrate VM with replicated volumes\n" if $self->{running};
441 my $start_time = time();
442 my $logfunc = sub { my ($msg) = @_; $self->log('info', $msg); };
443 $rep_volumes = PVE::Replication::run_replication(
444 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc);
445 $self->{replicated_volumes} = $rep_volumes;
446 }
447
448 foreach my $volid (keys %$local_volumes) {
449 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
450 if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid}->{ref} eq 'config') {
451 push @{$self->{online_local_volumes}}, $volid;
452 } else {
453 next if $rep_volumes->{$volid};
454 push @{$self->{volumes}}, $volid;
455 my $insecure = $self->{opts}->{migration_type} eq 'insecure';
456 my $with_snapshots = $local_volumes->{$volid}->{snapshots};
457 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid,
458 undef, undef, undef, undef, $insecure, $with_snapshots);
459 }
460 }
461 };
462 die "Failed to sync data - $@" if $@;
463 }
464
465 sub cleanup_remotedisks {
466 my ($self) = @_;
467
468 foreach my $target_drive (keys %{$self->{target_drive}}) {
469
470 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
471 my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
472
473 my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
474
475 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
476 if (my $err = $@) {
477 $self->log('err', $err);
478 $self->{errors} = 1;
479 }
480 }
481 }
482
483 sub phase1 {
484 my ($self, $vmid) = @_;
485
486 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
487
488 my $conf = $self->{vmconf};
489
490 # set migrate lock in config file
491 $conf->{lock} = 'migrate';
492 PVE::QemuConfig->write_config($vmid, $conf);
493
494 sync_disks($self, $vmid);
495
496 };
497
498 sub phase1_cleanup {
499 my ($self, $vmid, $err) = @_;
500
501 $self->log('info', "aborting phase 1 - cleanup resources");
502
503 my $conf = $self->{vmconf};
504 delete $conf->{lock};
505 eval { PVE::QemuConfig->write_config($vmid, $conf) };
506 if (my $err = $@) {
507 $self->log('err', $err);
508 }
509
510 if ($self->{volumes}) {
511 foreach my $volid (@{$self->{volumes}}) {
512 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
513 # fixme: try to remove ?
514 }
515 }
516 }
517
518 sub phase2 {
519 my ($self, $vmid) = @_;
520
521 my $conf = $self->{vmconf};
522
523 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
524
525 my $raddr;
526 my $rport;
527 my $ruri; # the whole migration dst. URI (protocol:address[:port])
528 my $nodename = PVE::INotify::nodename();
529
530 ## start on remote node
531 my $cmd = [@{$self->{rem_ssh}}];
532
533 my $spice_ticket;
534 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
535 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
536 $spice_ticket = $res->{ticket};
537 }
538
539 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
540
541 my $migration_type = $self->{opts}->{migration_type};
542
543 push @$cmd, '--migration_type', $migration_type;
544
545 push @$cmd, '--migration_network', $self->{opts}->{migration_network}
546 if $self->{opts}->{migration_network};
547
548 if ($migration_type eq 'insecure') {
549 push @$cmd, '--stateuri', 'tcp';
550 } else {
551 push @$cmd, '--stateuri', 'unix';
552 }
553
554 if ($self->{forcemachine}) {
555 push @$cmd, '--machine', $self->{forcemachine};
556 }
557
558 if ($self->{opts}->{targetstorage}) {
559 push @$cmd, '--targetstorage', $self->{opts}->{targetstorage};
560 }
561
562 my $spice_port;
563
564 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
565 # instead we pipe it through STDIN
566 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
567 my $line = shift;
568
569 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
570 $raddr = $1;
571 $rport = int($2);
572 $ruri = "tcp:$raddr:$rport";
573 }
574 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
575 $raddr = $1;
576 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
577 $ruri = "unix:$raddr";
578 }
579 elsif ($line =~ m/^migration listens on port (\d+)$/) {
580 $raddr = "localhost";
581 $rport = int($1);
582 $ruri = "tcp:$raddr:$rport";
583 }
584 elsif ($line =~ m/^spice listens on port (\d+)$/) {
585 $spice_port = int($1);
586 }
587 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
588 my $volid = $4;
589 my $nbd_uri = "nbd:$1:$2:exportname=$3";
590 my $targetdrive = $3;
591 $targetdrive =~ s/drive-//g;
592
593 $self->{target_drive}->{$targetdrive}->{volid} = $volid;
594 $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
595
596 }
597 }, errfunc => sub {
598 my $line = shift;
599 $self->log('info', $line);
600 });
601
602 die "unable to detect remote migration address\n" if !$raddr;
603
604 if ($migration_type eq 'secure') {
605 $self->log('info', "start remote tunnel");
606
607 if ($ruri =~ /^unix:/) {
608 unlink $raddr;
609 $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr");
610 $self->{tunnel}->{sock_addr} = $raddr;
611
612 my $unix_socket_try = 0; # wait for the socket to become ready
613 while (! -S $raddr) {
614 $unix_socket_try++;
615 if ($unix_socket_try > 100) {
616 $self->{errors} = 1;
617 $self->finish_tunnel($self->{tunnel});
618 die "Timeout, migration socket $ruri did not get ready";
619 }
620
621 usleep(50000);
622 }
623
624 } elsif ($ruri =~ /^tcp:/) {
625 my $tunnel_addr;
626 if ($raddr eq "localhost") {
627 # for backwards compatibility with older qemu-server versions
628 my $pfamily = PVE::Tools::get_host_address_family($nodename);
629 my $lport = PVE::Tools::next_migrate_port($pfamily);
630 $tunnel_addr = "$lport:localhost:$rport";
631 }
632
633 $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
634
635 } else {
636 die "unsupported protocol in migration URI: $ruri\n";
637 }
638 }
639
640 my $start = time();
641
642 if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) {
643 $self->{storage_migration} = 1;
644 $self->{storage_migration_jobs} = {};
645 $self->log('info', "starting storage migration");
646
647 die "The number of local disks does not match between the source and the destination.\n"
648 if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
649 foreach my $drive (keys %{$self->{target_drive}}){
650 my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
651 $self->log('info', "$drive: start migration to to $nbd_uri");
652 PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1);
653 }
654 }
655
656 $self->log('info', "starting online/live migration on $ruri");
657 $self->{livemigration} = 1;
658
659 # load_defaults
660 my $defaults = PVE::QemuServer::load_defaults();
661
662 # always set migrate speed (overwrite kvm default of 32m)
663 # we set a very hight default of 8192m which is basically unlimited
664 my $migrate_speed = $defaults->{migrate_speed} || 8192;
665 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
666 $migrate_speed = $migrate_speed * 1048576;
667 $self->log('info', "migrate_set_speed: $migrate_speed");
668 eval {
669 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
670 };
671 $self->log('info', "migrate_set_speed error: $@") if $@;
672
673 my $migrate_downtime = $defaults->{migrate_downtime};
674 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
675 if (defined($migrate_downtime)) {
676 $self->log('info', "migrate_set_downtime: $migrate_downtime");
677 eval {
678 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
679 };
680 $self->log('info', "migrate_set_downtime error: $@") if $@;
681 }
682
683 $self->log('info', "set migration_caps");
684 eval {
685 PVE::QemuServer::set_migration_caps($vmid);
686 };
687 warn $@ if $@;
688
689 # set cachesize to 10% of the total memory
690 my $memory = $conf->{memory} || $defaults->{memory};
691 my $cachesize = int($memory * 1048576 / 10);
692 $self->log('info', "set cachesize: $cachesize");
693 eval {
694 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
695 };
696 $self->log('info', "migrate-set-cache-size error: $@") if $@;
697
698 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
699 my $rpcenv = PVE::RPCEnvironment::get();
700 my $authuser = $rpcenv->get_user();
701
702 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
703
704 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
705 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
706
707 $self->log('info', "spice client_migrate_info");
708
709 eval {
710 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
711 hostname => $proxyticket, 'tls-port' => $spice_port,
712 'cert-subject' => $subject);
713 };
714 $self->log('info', "client_migrate_info error: $@") if $@;
715
716 }
717
718 $self->log('info', "start migrate command to $ruri");
719 eval {
720 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri);
721 };
722 my $merr = $@;
723 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
724
725 my $lstat = 0;
726 my $usleep = 1000000;
727 my $i = 0;
728 my $err_count = 0;
729 my $lastrem = undef;
730 my $downtimecounter = 0;
731 while (1) {
732 $i++;
733 my $avglstat = $lstat/$i if $lstat;
734
735 usleep($usleep);
736 my $stat;
737 eval {
738 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
739 };
740 if (my $err = $@) {
741 $err_count++;
742 warn "query migrate failed: $err\n";
743 $self->log('info', "query migrate failed: $err");
744 if ($err_count <= 5) {
745 usleep(1000000);
746 next;
747 }
748 die "too many query migrate failures - aborting\n";
749 }
750
751 if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) {
752 sleep(1);
753 next;
754 }
755
756 if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
757 $merr = undef;
758 $err_count = 0;
759 if ($stat->{status} eq 'completed') {
760 my $delay = time() - $start;
761 if ($delay > 0) {
762 my $mbps = sprintf "%.2f", $memory / $delay;
763 my $downtime = $stat->{downtime} || 0;
764 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
765 }
766 }
767
768 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
769 $self->log('info', "migration status error: $stat->{status}");
770 die "aborting\n"
771 }
772
773 if ($stat->{status} ne 'active') {
774 $self->log('info', "migration status: $stat->{status}");
775 last;
776 }
777
778 if ($stat->{ram}->{transferred} ne $lstat) {
779 my $trans = $stat->{ram}->{transferred} || 0;
780 my $rem = $stat->{ram}->{remaining} || 0;
781 my $total = $stat->{ram}->{total} || 0;
782 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
783 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
784 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
785 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
786 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
787 # reduce sleep if remainig memory is lower than the average transfer speed
788 $usleep = 100000 if $avglstat && $rem < $avglstat;
789
790 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
791 "remaining ${rem}), total ${total})");
792
793 if (${xbzrlecachesize}) {
794 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
795 }
796
797 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
798 $downtimecounter++;
799 }
800 $lastrem = $rem;
801
802 if ($downtimecounter > 5) {
803 $downtimecounter = 0;
804 $migrate_downtime *= 2;
805 $self->log('info', "migrate_set_downtime: $migrate_downtime");
806 eval {
807 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
808 };
809 $self->log('info', "migrate_set_downtime error: $@") if $@;
810 }
811
812 }
813
814
815 $lstat = $stat->{ram}->{transferred};
816
817 } else {
818 die $merr if $merr;
819 die "unable to parse migration status '$stat->{status}' - aborting\n";
820 }
821 }
822 }
823
824 sub phase2_cleanup {
825 my ($self, $vmid, $err) = @_;
826
827 return if !$self->{errors};
828 $self->{phase2errors} = 1;
829
830 $self->log('info', "aborting phase 2 - cleanup resources");
831
832 $self->log('info', "migrate_cancel");
833 eval {
834 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
835 };
836 $self->log('info', "migrate_cancel error: $@") if $@;
837
838 my $conf = $self->{vmconf};
839 delete $conf->{lock};
840 eval { PVE::QemuConfig->write_config($vmid, $conf) };
841 if (my $err = $@) {
842 $self->log('err', $err);
843 }
844
845 # cleanup ressources on target host
846 if ($self->{storage_migration}) {
847
848 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
849 if (my $err = $@) {
850 $self->log('err', $err);
851 }
852
853 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
854 if (my $err = $@) {
855 $self->log('err', $err);
856 }
857 }
858
859 my $nodename = PVE::INotify::nodename();
860
861 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
862 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
863 if (my $err = $@) {
864 $self->log('err', $err);
865 $self->{errors} = 1;
866 }
867
868 if ($self->{tunnel}) {
869 eval { finish_tunnel($self, $self->{tunnel}); };
870 if (my $err = $@) {
871 $self->log('err', $err);
872 $self->{errors} = 1;
873 }
874 }
875 }
876
877 sub phase3 {
878 my ($self, $vmid) = @_;
879
880 my $volids = $self->{volumes};
881 return if $self->{phase2errors};
882
883 # destroy local copies
884 foreach my $volid (@$volids) {
885 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
886 if (my $err = $@) {
887 $self->log('err', "removing local copy of '$volid' failed - $err");
888 $self->{errors} = 1;
889 last if $err =~ /^interrupted by signal$/;
890 }
891 }
892 }
893
894 sub phase3_cleanup {
895 my ($self, $vmid, $err) = @_;
896
897 my $conf = $self->{vmconf};
898 return if $self->{phase2errors};
899
900 my $tunnel = $self->{tunnel};
901
902 if ($self->{storage_migration}) {
903 # finish block-job
904 eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); };
905
906 if (my $err = $@) {
907 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
908 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
909 die "Failed to completed storage migration\n";
910 } else {
911 foreach my $target_drive (keys %{$self->{target_drive}}) {
912 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
913 $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
914 PVE::QemuConfig->write_config($vmid, $conf);
915 }
916 }
917 }
918
919 # transfer replication state before move config
920 $self->transfer_replication_state() if $self->{replicated_volumes};
921
922 # move config to remote node
923 my $conffile = PVE::QemuConfig->config_file($vmid);
924 my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
925
926 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
927 if !rename($conffile, $newconffile);
928
929 $self->switch_replication_job_target() if $self->{replicated_volumes};
930
931 if ($self->{livemigration}) {
932 if ($self->{storage_migration}) {
933 # stop nbd server on remote vm - requirement for resume since 2.9
934 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
935
936 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
937 if (my $err = $@) {
938 $self->log('err', $err);
939 $self->{errors} = 1;
940 }
941 }
942
943 # config moved and nbd server stopped - now we can resume vm on target
944 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) {
945 eval {
946 $self->write_tunnel($tunnel, 30, "resume $vmid");
947 };
948 if (my $err = $@) {
949 $self->log('err', $err);
950 $self->{errors} = 1;
951 }
952 } else {
953 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
954 my $logf = sub {
955 my $line = shift;
956 $self->log('err', $line);
957 };
958 eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); };
959 if (my $err = $@) {
960 $self->log('err', $err);
961 $self->{errors} = 1;
962 }
963 }
964 }
965
966 # close tunnel on successful migration, on error phase2_cleanup closed it
967 if ($tunnel) {
968 eval { finish_tunnel($self, $tunnel); };
969 if (my $err = $@) {
970 $self->log('err', $err);
971 $self->{errors} = 1;
972 }
973 }
974
975 eval {
976 my $timer = 0;
977 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
978 $self->log('info', "Waiting for spice server migration");
979 while (1) {
980 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
981 last if int($res->{'migrated'}) == 1;
982 last if $timer > 50;
983 $timer ++;
984 usleep(200000);
985 }
986 }
987 };
988
989 # always stop local VM
990 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
991 if (my $err = $@) {
992 $self->log('err', "stopping vm failed - $err");
993 $self->{errors} = 1;
994 }
995
996 # always deactivate volumes - avoid lvm LVs to be active on several nodes
997 eval {
998 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
999 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
1000 };
1001 if (my $err = $@) {
1002 $self->log('err', $err);
1003 $self->{errors} = 1;
1004 }
1005
1006 if($self->{storage_migration}) {
1007 # destroy local copies
1008 my $volids = $self->{online_local_volumes};
1009
1010 foreach my $volid (@$volids) {
1011 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
1012 if (my $err = $@) {
1013 $self->log('err', "removing local copy of '$volid' failed - $err");
1014 $self->{errors} = 1;
1015 last if $err =~ /^interrupted by signal$/;
1016 }
1017 }
1018
1019 }
1020
1021 # clear migrate lock
1022 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
1023 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
1024 }
1025
1026 sub final_cleanup {
1027 my ($self, $vmid) = @_;
1028
1029 # nothing to do
1030 }
1031
1032 1;