]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
migrate: fix local disk migration with online VMs
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use POSIX qw( WNOHANG );
9 use PVE::INotify;
10 use PVE::Tools;
11 use PVE::Cluster;
12 use PVE::Storage;
13 use PVE::QemuServer;
14 use Time::HiRes qw( usleep );
15 use PVE::RPCEnvironment;
16 use PVE::ReplicationConfig;
17 use PVE::ReplicationState;
18 use PVE::Replication;
19
20 use base qw(PVE::AbstractMigrate);
21
22 sub fork_command_pipe {
23 my ($self, $cmd) = @_;
24
25 my $reader = IO::File->new();
26 my $writer = IO::File->new();
27
28 my $orig_pid = $$;
29
30 my $cpid;
31
32 eval { $cpid = open2($reader, $writer, @$cmd); };
33
34 my $err = $@;
35
36 # catch exec errors
37 if ($orig_pid != $$) {
38 $self->log('err', "can't fork command pipe\n");
39 POSIX::_exit(1);
40 kill('KILL', $$);
41 }
42
43 die $err if $err;
44
45 return { writer => $writer, reader => $reader, pid => $cpid };
46 }
47
48 sub finish_command_pipe {
49 my ($self, $cmdpipe, $timeout) = @_;
50
51 my $cpid = $cmdpipe->{pid};
52 return if !defined($cpid);
53
54 my $writer = $cmdpipe->{writer};
55 my $reader = $cmdpipe->{reader};
56
57 $writer->close();
58 $reader->close();
59
60 my $collect_child_process = sub {
61 my $res = waitpid($cpid, WNOHANG);
62 if (defined($res) && ($res == $cpid)) {
63 delete $cmdpipe->{cpid};
64 return 1;
65 } else {
66 return 0;
67 }
68 };
69
70 if ($timeout) {
71 for (my $i = 0; $i < $timeout; $i++) {
72 return if &$collect_child_process();
73 sleep(1);
74 }
75 }
76
77 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
78 kill(15, $cpid);
79
80 # wait again
81 for (my $i = 0; $i < 10; $i++) {
82 return if &$collect_child_process();
83 sleep(1);
84 }
85
86 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
87 kill 9, $cpid;
88 sleep 1;
89
90 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
91 if !&$collect_child_process();
92 }
93
94 sub read_tunnel {
95 my ($self, $tunnel, $timeout) = @_;
96
97 $timeout = 60 if !defined($timeout);
98
99 my $reader = $tunnel->{reader};
100
101 my $output;
102 eval {
103 PVE::Tools::run_with_timeout($timeout, sub { $output = <$reader>; });
104 };
105 die "reading from tunnel failed: $@\n" if $@;
106
107 chomp $output;
108
109 return $output;
110 }
111
112 sub write_tunnel {
113 my ($self, $tunnel, $timeout, $command) = @_;
114
115 $timeout = 60 if !defined($timeout);
116
117 my $writer = $tunnel->{writer};
118
119 eval {
120 PVE::Tools::run_with_timeout($timeout, sub {
121 print $writer "$command\n";
122 $writer->flush();
123 });
124 };
125 die "writing to tunnel failed: $@\n" if $@;
126
127 if ($tunnel->{version} && $tunnel->{version} >= 1) {
128 my $res = eval { $self->read_tunnel($tunnel, 10); };
129 die "no reply to command '$command': $@\n" if $@;
130
131 if ($res eq 'OK') {
132 return;
133 } else {
134 die "tunnel replied '$res' to command '$command'\n";
135 }
136 }
137 }
138
139 sub fork_tunnel {
140 my ($self, $tunnel_addr) = @_;
141
142 my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
143
144 my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ];
145
146 my $tunnel = $self->fork_command_pipe($cmd);
147
148 eval {
149 my $helo = $self->read_tunnel($tunnel, 60);
150 die "no reply\n" if !$helo;
151 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
152 die "got strange reply from mtunnel ('$helo')\n"
153 if $helo !~ m/^tunnel online$/;
154 };
155 my $err = $@;
156
157 eval {
158 my $ver = $self->read_tunnel($tunnel, 10);
159 if ($ver =~ /^ver (\d+)$/) {
160 $tunnel->{version} = $1;
161 $self->log('info', "ssh tunnel $ver\n");
162 } else {
163 $err = "received invalid tunnel version string '$ver'\n" if !$err;
164 }
165 };
166
167 if ($err) {
168 $self->finish_command_pipe($tunnel);
169 die "can't open migration tunnel - $err";
170 }
171 return $tunnel;
172 }
173
174 sub finish_tunnel {
175 my ($self, $tunnel) = @_;
176
177 eval { $self->write_tunnel($tunnel, 30, 'quit'); };
178 my $err = $@;
179
180 $self->finish_command_pipe($tunnel, 30);
181
182 if ($tunnel->{sock_addr}) {
183 # ssh does not clean up on local host
184 my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; #
185 PVE::Tools::run_command($cmd);
186
187 # .. and just to be sure check on remote side
188 unshift @{$cmd}, @{$self->{rem_ssh}};
189 PVE::Tools::run_command($cmd);
190 }
191
192 die $err if $err;
193 }
194
195 sub lock_vm {
196 my ($self, $vmid, $code, @param) = @_;
197
198 return PVE::QemuConfig->lock_config($vmid, $code, @param);
199 }
200
201 sub prepare {
202 my ($self, $vmid) = @_;
203
204 my $online = $self->{opts}->{online};
205
206 $self->{storecfg} = PVE::Storage::config();
207
208 # test if VM exists
209 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
210
211 PVE::QemuConfig->check_lock($conf);
212
213 my $running = 0;
214 if (my $pid = PVE::QemuServer::check_running($vmid)) {
215 die "can't migrate running VM without --online\n" if !$online;
216 $running = $pid;
217
218 $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
219
220 }
221
222 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
223 if ($self->{running} || !$self->{opts}->{force}) {
224 die "can't migrate VM which uses local devices\n";
225 } else {
226 $self->log('info', "migrating VM which uses local devices");
227 }
228 }
229
230 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
231
232 my $need_activate = [];
233 foreach my $volid (@$vollist) {
234 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
235
236 # check if storage is available on both nodes
237 my $targetsid = $self->{opts}->{targetstorage} // $sid;
238
239 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
240 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
241
242 if ($scfg->{shared}) {
243 # PVE::Storage::activate_storage checks this for non-shared storages
244 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
245 warn "Used shared storage '$sid' is not online on source node!\n"
246 if !$plugin->check_connection($sid, $scfg);
247 } else {
248 # only activate if not shared
249 push @$need_activate, $volid;
250 }
251 }
252
253 # activate volumes
254 PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
255
256 # test ssh connection
257 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
258 eval { $self->cmd_quiet($cmd); };
259 die "Can't connect to destination address using public key\n" if $@;
260
261 return $running;
262 }
263
264 sub sync_disks {
265 my ($self, $vmid) = @_;
266
267 my $conf = $self->{vmconf};
268
269 # local volumes which have been copied
270 $self->{volumes} = [];
271
272 my $override_targetsid = $self->{opts}->{targetstorage};
273
274 eval {
275
276 # found local volumes and their origin
277 my $local_volumes = {};
278 my $local_volumes_errors = {};
279 my $other_errors = [];
280 my $abort = 0;
281
282 my $sharedvm = 1;
283
284 my $log_error = sub {
285 my ($msg, $volid) = @_;
286
287 if (defined($volid)) {
288 $local_volumes_errors->{$volid} = $msg;
289 } else {
290 push @$other_errors, $msg;
291 }
292 $abort = 1;
293 };
294
295 my @sids = PVE::Storage::storage_ids($self->{storecfg});
296 foreach my $storeid (@sids) {
297 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
298 next if $scfg->{shared};
299 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
300
301 # get list from PVE::Storage (for unused volumes)
302 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
303
304 next if @{$dl->{$storeid}} == 0;
305
306 my $targetsid = $override_targetsid // $storeid;
307
308 # check if storage is available on target node
309 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
310 $sharedvm = 0; # there is a non-shared disk
311
312 PVE::Storage::foreach_volid($dl, sub {
313 my ($volid, $sid, $volname) = @_;
314
315 $local_volumes->{$volid}->{ref} = 'storage';
316 });
317 }
318
319 my $test_volid = sub {
320 my ($volid, $attr) = @_;
321
322 if ($volid =~ m|^/|) {
323 return if $attr->{shared};
324 $local_volumes->{$volid}->{ref} = 'config';
325 die "local file/device\n";
326 }
327
328 my $snaprefs = $attr->{referenced_in_snapshot};
329
330 if ($attr->{cdrom}) {
331 if ($volid eq 'cdrom') {
332 my $msg = "can't migrate local cdrom drive";
333 if (defined($snaprefs) && !$attr->{referenced_in_config}) {
334 my $snapnames = join(', ', sort keys %$snaprefs);
335 $msg .= " (referenced in snapshot - $snapnames)";
336 }
337 &$log_error("$msg\n");
338 return;
339 }
340 return if $volid eq 'none';
341 }
342
343 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
344
345 my $targetsid = $override_targetsid // $sid;
346 # check if storage is available on both nodes
347 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
348 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
349
350 return if $scfg->{shared};
351
352 $sharedvm = 0;
353
354 $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot';
355
356 die "local cdrom image\n" if $attr->{cdrom};
357
358 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
359
360 die "owned by other VM (owner = VM $owner)\n"
361 if !$owner || ($owner != $self->{vmid});
362
363 my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
364 $local_volumes->{$volid}->{snapshots} = defined($snaprefs) || ($format =~ /^(?:qcow2|vmdk)$/);
365 if (defined($snaprefs)) {
366 # we cannot migrate shapshots on local storage
367 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
368
369 die "online storage migration not possible if snapshot exists\n" if $self->{running};
370 if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) {
371 die "non-migratable snapshot exists\n";
372 }
373 }
374
375 die "referenced by linked clone(s)\n"
376 if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
377 };
378
379 PVE::QemuServer::foreach_volid($conf, sub {
380 my ($volid, $attr) = @_;
381 eval { $test_volid->($volid, $attr); };
382 if (my $err = $@) {
383 &$log_error($err, $volid);
384 }
385 });
386
387 foreach my $vol (sort keys %$local_volumes) {
388 my $ref = $local_volumes->{$vol}->{ref};
389 if ($ref eq 'storage') {
390 $self->log('info', "found local disk '$vol' (via storage)\n");
391 } elsif ($ref eq 'config') {
392 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
393 if $self->{running} && !$self->{opts}->{"with-local-disks"};
394 $self->log('info', "found local disk '$vol' (in current VM config)\n");
395 } elsif ($ref eq 'snapshot') {
396 $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
397 } else {
398 $self->log('info', "found local disk '$vol'\n");
399 }
400 }
401
402 foreach my $vol (sort keys %$local_volumes_errors) {
403 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
404 }
405 foreach my $err (@$other_errors) {
406 $self->log('warn', "$err");
407 }
408
409 if ($abort) {
410 die "can't migrate VM - check log\n";
411 }
412
413 # additional checks for local storage
414 foreach my $volid (keys %$local_volumes) {
415 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
416 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
417
418 my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') ||
419 ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm');
420
421 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
422 if !$migratable;
423
424 # image is a linked clone on local storage, se we can't migrate.
425 if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
426 die "can't migrate '$volid' as it's a clone of '$basename'";
427 }
428 }
429
430 my $rep_volumes;
431
432 $self->log('info', "copying disk images");
433
434 my $rep_cfg = PVE::ReplicationConfig->new();
435
436 if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) {
437 die "can't live migrate VM with replicated volumes\n" if $self->{running};
438 my $start_time = time();
439 my $logfunc = sub { my ($msg) = @_; $self->log('info', $msg); };
440 $rep_volumes = PVE::Replication::run_replication(
441 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc);
442 $self->{replicated_volumes} = $rep_volumes;
443 }
444
445 foreach my $volid (keys %$local_volumes) {
446 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
447 my $targetsid = $override_targetsid // $sid;
448 if ($self->{running} && $local_volumes->{$volid}->{ref} eq 'config') {
449 push @{$self->{online_local_volumes}}, $volid;
450 } else {
451 next if $rep_volumes->{$volid};
452 push @{$self->{volumes}}, $volid;
453 my $insecure = $self->{opts}->{migration_type} eq 'insecure';
454 my $with_snapshots = $local_volumes->{$volid}->{snapshots};
455 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid,
456 undef, undef, undef, undef, $insecure, $with_snapshots);
457 }
458 }
459 };
460 die "Failed to sync data - $@" if $@;
461 }
462
463 sub cleanup_remotedisks {
464 my ($self) = @_;
465
466 foreach my $target_drive (keys %{$self->{target_drive}}) {
467
468 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
469 my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
470
471 my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
472
473 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
474 if (my $err = $@) {
475 $self->log('err', $err);
476 $self->{errors} = 1;
477 }
478 }
479 }
480
481 sub phase1 {
482 my ($self, $vmid) = @_;
483
484 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
485
486 my $conf = $self->{vmconf};
487
488 # set migrate lock in config file
489 $conf->{lock} = 'migrate';
490 PVE::QemuConfig->write_config($vmid, $conf);
491
492 sync_disks($self, $vmid);
493
494 };
495
496 sub phase1_cleanup {
497 my ($self, $vmid, $err) = @_;
498
499 $self->log('info', "aborting phase 1 - cleanup resources");
500
501 my $conf = $self->{vmconf};
502 delete $conf->{lock};
503 eval { PVE::QemuConfig->write_config($vmid, $conf) };
504 if (my $err = $@) {
505 $self->log('err', $err);
506 }
507
508 if ($self->{volumes}) {
509 foreach my $volid (@{$self->{volumes}}) {
510 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
511 # fixme: try to remove ?
512 }
513 }
514 }
515
516 sub phase2 {
517 my ($self, $vmid) = @_;
518
519 my $conf = $self->{vmconf};
520
521 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
522
523 my $raddr;
524 my $rport;
525 my $ruri; # the whole migration dst. URI (protocol:address[:port])
526 my $nodename = PVE::INotify::nodename();
527
528 ## start on remote node
529 my $cmd = [@{$self->{rem_ssh}}];
530
531 my $spice_ticket;
532 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
533 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
534 $spice_ticket = $res->{ticket};
535 }
536
537 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
538
539 my $migration_type = $self->{opts}->{migration_type};
540
541 push @$cmd, '--migration_type', $migration_type;
542
543 push @$cmd, '--migration_network', $self->{opts}->{migration_network}
544 if $self->{opts}->{migration_network};
545
546 if ($migration_type eq 'insecure') {
547 push @$cmd, '--stateuri', 'tcp';
548 } else {
549 push @$cmd, '--stateuri', 'unix';
550 }
551
552 if ($self->{forcemachine}) {
553 push @$cmd, '--machine', $self->{forcemachine};
554 }
555
556 if ($self->{online_local_volumes}) {
557 push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1');
558 }
559
560 my $spice_port;
561
562 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
563 # instead we pipe it through STDIN
564 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
565 my $line = shift;
566
567 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
568 $raddr = $1;
569 $rport = int($2);
570 $ruri = "tcp:$raddr:$rport";
571 }
572 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
573 $raddr = $1;
574 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
575 $ruri = "unix:$raddr";
576 }
577 elsif ($line =~ m/^migration listens on port (\d+)$/) {
578 $raddr = "localhost";
579 $rport = int($1);
580 $ruri = "tcp:$raddr:$rport";
581 }
582 elsif ($line =~ m/^spice listens on port (\d+)$/) {
583 $spice_port = int($1);
584 }
585 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
586 my $volid = $4;
587 my $nbd_uri = "nbd:$1:$2:exportname=$3";
588 my $targetdrive = $3;
589 $targetdrive =~ s/drive-//g;
590
591 $self->{target_drive}->{$targetdrive}->{volid} = $volid;
592 $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
593
594 }
595 }, errfunc => sub {
596 my $line = shift;
597 $self->log('info', $line);
598 });
599
600 die "unable to detect remote migration address\n" if !$raddr;
601
602 $self->log('info', "start remote tunnel");
603
604 if ($migration_type eq 'secure') {
605
606 if ($ruri =~ /^unix:/) {
607 unlink $raddr;
608 $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr");
609 $self->{tunnel}->{sock_addr} = $raddr;
610
611 my $unix_socket_try = 0; # wait for the socket to become ready
612 while (! -S $raddr) {
613 $unix_socket_try++;
614 if ($unix_socket_try > 100) {
615 $self->{errors} = 1;
616 $self->finish_tunnel($self->{tunnel});
617 die "Timeout, migration socket $ruri did not get ready";
618 }
619
620 usleep(50000);
621 }
622
623 } elsif ($ruri =~ /^tcp:/) {
624 my $tunnel_addr;
625 if ($raddr eq "localhost") {
626 # for backwards compatibility with older qemu-server versions
627 my $pfamily = PVE::Tools::get_host_address_family($nodename);
628 my $lport = PVE::Tools::next_migrate_port($pfamily);
629 $tunnel_addr = "$lport:localhost:$rport";
630 }
631
632 $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
633
634 } else {
635 die "unsupported protocol in migration URI: $ruri\n";
636 }
637 } else {
638 #fork tunnel for insecure migration, to send faster commands like resume
639 $self->{tunnel} = $self->fork_tunnel();
640 }
641
642 my $start = time();
643
644 if (defined($self->{online_local_volumes})) {
645 $self->{storage_migration} = 1;
646 $self->{storage_migration_jobs} = {};
647 $self->log('info', "starting storage migration");
648
649 die "The number of local disks does not match between the source and the destination.\n"
650 if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
651 foreach my $drive (keys %{$self->{target_drive}}){
652 my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
653 $self->log('info', "$drive: start migration to $nbd_uri");
654 PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1);
655 }
656 }
657
658 $self->log('info', "starting online/live migration on $ruri");
659 $self->{livemigration} = 1;
660
661 # load_defaults
662 my $defaults = PVE::QemuServer::load_defaults();
663
664 # always set migrate speed (overwrite kvm default of 32m)
665 # we set a very hight default of 8192m which is basically unlimited
666 my $migrate_speed = $defaults->{migrate_speed} || 8192;
667 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
668 $migrate_speed = $migrate_speed * 1048576;
669 $self->log('info', "migrate_set_speed: $migrate_speed");
670 eval {
671 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
672 };
673 $self->log('info', "migrate_set_speed error: $@") if $@;
674
675 my $migrate_downtime = $defaults->{migrate_downtime};
676 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
677 if (defined($migrate_downtime)) {
678 $self->log('info', "migrate_set_downtime: $migrate_downtime");
679 eval {
680 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
681 };
682 $self->log('info', "migrate_set_downtime error: $@") if $@;
683 }
684
685 $self->log('info', "set migration_caps");
686 eval {
687 PVE::QemuServer::set_migration_caps($vmid);
688 };
689 warn $@ if $@;
690
691 # set cachesize to 10% of the total memory
692 my $memory = $conf->{memory} || $defaults->{memory};
693 my $cachesize = int($memory * 1048576 / 10);
694 $cachesize = round_powerof2($cachesize);
695
696 $self->log('info', "set cachesize: $cachesize");
697 eval {
698 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
699 };
700 $self->log('info', "migrate-set-cache-size error: $@") if $@;
701
702 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
703 my $rpcenv = PVE::RPCEnvironment::get();
704 my $authuser = $rpcenv->get_user();
705
706 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
707
708 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
709 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
710
711 $self->log('info', "spice client_migrate_info");
712
713 eval {
714 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
715 hostname => $proxyticket, 'tls-port' => $spice_port,
716 'cert-subject' => $subject);
717 };
718 $self->log('info', "client_migrate_info error: $@") if $@;
719
720 }
721
722 $self->log('info', "start migrate command to $ruri");
723 eval {
724 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri);
725 };
726 my $merr = $@;
727 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
728
729 my $lstat = 0;
730 my $usleep = 1000000;
731 my $i = 0;
732 my $err_count = 0;
733 my $lastrem = undef;
734 my $downtimecounter = 0;
735 while (1) {
736 $i++;
737 my $avglstat = $lstat/$i if $lstat;
738
739 usleep($usleep);
740 my $stat;
741 eval {
742 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
743 };
744 if (my $err = $@) {
745 $err_count++;
746 warn "query migrate failed: $err\n";
747 $self->log('info', "query migrate failed: $err");
748 if ($err_count <= 5) {
749 usleep(1000000);
750 next;
751 }
752 die "too many query migrate failures - aborting\n";
753 }
754
755 if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) {
756 sleep(1);
757 next;
758 }
759
760 if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
761 $merr = undef;
762 $err_count = 0;
763 if ($stat->{status} eq 'completed') {
764 my $delay = time() - $start;
765 if ($delay > 0) {
766 my $mbps = sprintf "%.2f", $memory / $delay;
767 my $downtime = $stat->{downtime} || 0;
768 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
769 }
770 }
771
772 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
773 $self->log('info', "migration status error: $stat->{status}");
774 die "aborting\n"
775 }
776
777 if ($stat->{status} ne 'active') {
778 $self->log('info', "migration status: $stat->{status}");
779 last;
780 }
781
782 if ($stat->{ram}->{transferred} ne $lstat) {
783 my $trans = $stat->{ram}->{transferred} || 0;
784 my $rem = $stat->{ram}->{remaining} || 0;
785 my $total = $stat->{ram}->{total} || 0;
786 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
787 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
788 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
789 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
790 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
791 # reduce sleep if remainig memory is lower than the average transfer speed
792 $usleep = 100000 if $avglstat && $rem < $avglstat;
793
794 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
795 "remaining ${rem}), total ${total})");
796
797 if (${xbzrlecachesize}) {
798 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
799 }
800
801 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
802 $downtimecounter++;
803 }
804 $lastrem = $rem;
805
806 if ($downtimecounter > 5) {
807 $downtimecounter = 0;
808 $migrate_downtime *= 2;
809 $self->log('info', "migrate_set_downtime: $migrate_downtime");
810 eval {
811 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
812 };
813 $self->log('info', "migrate_set_downtime error: $@") if $@;
814 }
815
816 }
817
818
819 $lstat = $stat->{ram}->{transferred};
820
821 } else {
822 die $merr if $merr;
823 die "unable to parse migration status '$stat->{status}' - aborting\n";
824 }
825 }
826 }
827
828 sub phase2_cleanup {
829 my ($self, $vmid, $err) = @_;
830
831 return if !$self->{errors};
832 $self->{phase2errors} = 1;
833
834 $self->log('info', "aborting phase 2 - cleanup resources");
835
836 $self->log('info', "migrate_cancel");
837 eval {
838 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
839 };
840 $self->log('info', "migrate_cancel error: $@") if $@;
841
842 my $conf = $self->{vmconf};
843 delete $conf->{lock};
844 eval { PVE::QemuConfig->write_config($vmid, $conf) };
845 if (my $err = $@) {
846 $self->log('err', $err);
847 }
848
849 # cleanup ressources on target host
850 if ($self->{storage_migration}) {
851
852 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
853 if (my $err = $@) {
854 $self->log('err', $err);
855 }
856
857 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
858 if (my $err = $@) {
859 $self->log('err', $err);
860 }
861 }
862
863 my $nodename = PVE::INotify::nodename();
864
865 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
866 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
867 if (my $err = $@) {
868 $self->log('err', $err);
869 $self->{errors} = 1;
870 }
871
872 if ($self->{tunnel}) {
873 eval { finish_tunnel($self, $self->{tunnel}); };
874 if (my $err = $@) {
875 $self->log('err', $err);
876 $self->{errors} = 1;
877 }
878 }
879 }
880
881 sub phase3 {
882 my ($self, $vmid) = @_;
883
884 my $volids = $self->{volumes};
885 return if $self->{phase2errors};
886
887 # destroy local copies
888 foreach my $volid (@$volids) {
889 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
890 if (my $err = $@) {
891 $self->log('err', "removing local copy of '$volid' failed - $err");
892 $self->{errors} = 1;
893 last if $err =~ /^interrupted by signal$/;
894 }
895 }
896 }
897
898 sub phase3_cleanup {
899 my ($self, $vmid, $err) = @_;
900
901 my $conf = $self->{vmconf};
902 return if $self->{phase2errors};
903
904 my $tunnel = $self->{tunnel};
905
906 if ($self->{storage_migration}) {
907 # finish block-job
908 eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); };
909
910 if (my $err = $@) {
911 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
912 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
913 die "Failed to completed storage migration\n";
914 } else {
915 foreach my $target_drive (keys %{$self->{target_drive}}) {
916 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
917 $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
918 PVE::QemuConfig->write_config($vmid, $conf);
919 }
920 }
921 }
922
923 # transfer replication state before move config
924 $self->transfer_replication_state() if $self->{replicated_volumes};
925
926 # move config to remote node
927 my $conffile = PVE::QemuConfig->config_file($vmid);
928 my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
929
930 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
931 if !rename($conffile, $newconffile);
932
933 $self->switch_replication_job_target() if $self->{replicated_volumes};
934
935 if ($self->{livemigration}) {
936 if ($self->{storage_migration}) {
937 # stop nbd server on remote vm - requirement for resume since 2.9
938 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
939
940 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
941 if (my $err = $@) {
942 $self->log('err', $err);
943 $self->{errors} = 1;
944 }
945 }
946
947 # config moved and nbd server stopped - now we can resume vm on target
948 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) {
949 eval {
950 $self->write_tunnel($tunnel, 30, "resume $vmid");
951 };
952 if (my $err = $@) {
953 $self->log('err', $err);
954 $self->{errors} = 1;
955 }
956 } else {
957 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
958 my $logf = sub {
959 my $line = shift;
960 $self->log('err', $line);
961 };
962 eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); };
963 if (my $err = $@) {
964 $self->log('err', $err);
965 $self->{errors} = 1;
966 }
967 }
968
969 if ($self->{storage_migration} && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && $self->{running}) {
970 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
971 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
972 }
973 }
974
975 # close tunnel on successful migration, on error phase2_cleanup closed it
976 if ($tunnel) {
977 eval { finish_tunnel($self, $tunnel); };
978 if (my $err = $@) {
979 $self->log('err', $err);
980 $self->{errors} = 1;
981 }
982 }
983
984 eval {
985 my $timer = 0;
986 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
987 $self->log('info', "Waiting for spice server migration");
988 while (1) {
989 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
990 last if int($res->{'migrated'}) == 1;
991 last if $timer > 50;
992 $timer ++;
993 usleep(200000);
994 }
995 }
996 };
997
998 # always stop local VM
999 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
1000 if (my $err = $@) {
1001 $self->log('err', "stopping vm failed - $err");
1002 $self->{errors} = 1;
1003 }
1004
1005 # always deactivate volumes - avoid lvm LVs to be active on several nodes
1006 eval {
1007 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
1008 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
1009 };
1010 if (my $err = $@) {
1011 $self->log('err', $err);
1012 $self->{errors} = 1;
1013 }
1014
1015 if($self->{storage_migration}) {
1016 # destroy local copies
1017 my $volids = $self->{online_local_volumes};
1018
1019 foreach my $volid (@$volids) {
1020 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
1021 if (my $err = $@) {
1022 $self->log('err', "removing local copy of '$volid' failed - $err");
1023 $self->{errors} = 1;
1024 last if $err =~ /^interrupted by signal$/;
1025 }
1026 }
1027
1028 }
1029
1030 # clear migrate lock
1031 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
1032 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
1033 }
1034
1035 sub final_cleanup {
1036 my ($self, $vmid) = @_;
1037
1038 # nothing to do
1039 }
1040
1041 sub round_powerof2 {
1042 return 1 if $_[0] < 2;
1043 return 2 << int(log($_[0]-1)/log(2));
1044 }
1045
1046 1;