]>
git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
1 package PVE
::QemuMigrate
;
5 use PVE
::AbstractMigrate
;
8 use POSIX
qw( WNOHANG );
14 use Time
::HiRes
qw( usleep );
15 use PVE
::RPCEnvironment
;
17 use base
qw(PVE::AbstractMigrate);
19 sub fork_command_pipe
{
20 my ($self, $cmd) = @_;
22 my $reader = IO
::File-
>new();
23 my $writer = IO
::File-
>new();
29 eval { $cpid = open2
($reader, $writer, @$cmd); };
34 if ($orig_pid != $$) {
35 $self->log('err', "can't fork command pipe\n");
42 return { writer
=> $writer, reader
=> $reader, pid
=> $cpid };
45 sub finish_command_pipe
{
46 my ($self, $cmdpipe, $timeout) = @_;
48 my $cpid = $cmdpipe->{pid
};
49 return if !defined($cpid);
51 my $writer = $cmdpipe->{writer
};
52 my $reader = $cmdpipe->{reader
};
57 my $collect_child_process = sub {
58 my $res = waitpid($cpid, WNOHANG
);
59 if (defined($res) && ($res == $cpid)) {
60 delete $cmdpipe->{cpid
};
68 for (my $i = 0; $i < $timeout; $i++) {
69 return if &$collect_child_process();
74 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
78 for (my $i = 0; $i < 10; $i++) {
79 return if &$collect_child_process();
83 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
87 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
88 if !&$collect_child_process();
92 my ($self, $tunnel_addr) = @_;
94 my @localtunnelinfo = defined($tunnel_addr) ?
('-L' , $tunnel_addr ) : ();
96 my $cmd = [@{$self->{rem_ssh
}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ];
98 my $tunnel = $self->fork_command_pipe($cmd);
100 my $reader = $tunnel->{reader
};
104 PVE
::Tools
::run_with_timeout
(60, sub { $helo = <$reader>; });
105 die "no reply\n" if !$helo;
106 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
107 die "got strange reply from mtunnel ('$helo')\n"
108 if $helo !~ m/^tunnel online$/;
113 $self->finish_command_pipe($tunnel);
114 die "can't open migration tunnel - $err";
120 my ($self, $tunnel) = @_;
122 my $writer = $tunnel->{writer
};
125 PVE
::Tools
::run_with_timeout
(30, sub {
126 print $writer "quit\n";
132 $self->finish_command_pipe($tunnel, 30);
134 if ($tunnel->{sock_addr
}) {
135 # ssh does not clean up on local host
136 my $cmd = ['rm', '-f', $tunnel->{sock_addr
}]; #
137 PVE
::Tools
::run_command
($cmd);
139 # .. and just to be sure check on remote side
140 unshift @{$cmd}, @{$self->{rem_ssh
}};
141 PVE
::Tools
::run_command
($cmd);
148 my ($self, $vmid, $code, @param) = @_;
150 return PVE
::QemuConfig-
>lock_config($vmid, $code, @param);
154 my ($self, $vmid) = @_;
156 my $online = $self->{opts
}->{online
};
158 $self->{storecfg
} = PVE
::Storage
::config
();
161 my $conf = $self->{vmconf
} = PVE
::QemuConfig-
>load_config($vmid);
163 PVE
::QemuConfig-
>check_lock($conf);
166 if (my $pid = PVE
::QemuServer
::check_running
($vmid)) {
167 die "can't migrate running VM without --online\n" if !$online;
170 $self->{forcemachine
} = PVE
::QemuServer
::qemu_machine_pxe
($vmid, $conf);
174 if (my $loc_res = PVE
::QemuServer
::check_local_resources
($conf, 1)) {
175 if ($self->{running
} || !$self->{opts
}->{force
}) {
176 die "can't migrate VM which uses local devices\n";
178 $self->log('info', "migrating VM which uses local devices");
182 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
184 my $need_activate = [];
185 foreach my $volid (@$vollist) {
186 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid, 1);
188 # check if storage is available on both nodes
189 my $scfg = PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid);
190 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid, $self->{node
});
192 if ($scfg->{shared
}) {
193 # PVE::Storage::activate_storage checks this for non-shared storages
194 my $plugin = PVE
::Storage
::Plugin-
>lookup($scfg->{type
});
195 warn "Used shared storage '$sid' is not online on source node!\n"
196 if !$plugin->check_connection($sid, $scfg);
198 # only activate if not shared
199 push @$need_activate, $volid;
204 PVE
::Storage
::activate_volumes
($self->{storecfg
}, $need_activate);
206 # test ssh connection
207 my $cmd = [ @{$self->{rem_ssh
}}, '/bin/true' ];
208 eval { $self->cmd_quiet($cmd); };
209 die "Can't connect to destination address using public key\n" if $@;
215 my ($self, $vmid) = @_;
217 $self->log('info', "copying disk images");
219 my $conf = $self->{vmconf
};
221 $self->{volumes
} = [];
231 my @sids = PVE
::Storage
::storage_ids
($self->{storecfg
});
232 foreach my $storeid (@sids) {
233 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $storeid);
234 next if $scfg->{shared
};
235 next if !PVE
::Storage
::storage_check_enabled
($self->{storecfg
}, $storeid, undef, 1);
237 # get list from PVE::Storage (for unused volumes)
238 my $dl = PVE
::Storage
::vdisk_list
($self->{storecfg
}, $storeid, $vmid);
240 next if @{$dl->{$storeid}} == 0;
242 # check if storage is available on target node
243 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $storeid, $self->{node
});
244 $sharedvm = 0; # there is a non-shared disk
246 PVE
::Storage
::foreach_volid
($dl, sub {
247 my ($volid, $sid, $volname) = @_;
249 $volhash->{$volid} = 'storage';
253 my $test_volid = sub {
254 my ($volid, $is_cdrom, $snapname) = @_;
258 die "can't migrate local file/device '$volid'\n" if $volid =~ m
|^/|;
261 die "can't migrate local cdrom drive\n" if $volid eq 'cdrom';
262 return if $volid eq 'none';
265 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
267 # check if storage is available on both nodes
268 my $scfg = PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid);
269 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid, $self->{node
});
271 return if $scfg->{shared
};
275 $volhash->{$volid} = defined($snapname) ?
'snapshot' : 'config';
277 die "can't migrate local cdrom '$volid'\n" if $is_cdrom;
279 my ($path, $owner) = PVE
::Storage
::path
($self->{storecfg
}, $volid);
281 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
282 if !$owner || ($owner != $self->{vmid
});
284 if (defined($snapname)) {
285 # we cannot migrate shapshots on local storage
286 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
288 my $format = PVE
::QemuServer
::qemu_img_format
($scfg, $volname);
290 if (($scfg->{type
} eq 'zfspool') || ($format eq 'qcow2')) {
294 die "can't migrate snapshot of local volume '$volid'\n";
299 my $test_drive = sub {
300 my ($ds, $drive, $snapname) = @_;
302 &$test_volid($drive->{file
}, PVE
::QemuServer
::drive_is_cdrom
($drive), $snapname);
305 PVE
::QemuServer
::foreach_drive
($conf, $test_drive);
306 foreach my $snapname (keys %{$conf->{snapshots
}}) {
307 &$test_volid($conf->{snapshots
}->{$snapname}->{'vmstate'}, 0, undef)
308 if defined($conf->{snapshots
}->{$snapname}->{'vmstate'});
309 PVE
::QemuServer
::foreach_drive
($conf->{snapshots
}->{$snapname}, $test_drive, $snapname);
312 foreach my $vol (sort keys %$volhash) {
313 if ($volhash->{$vol} eq 'storage') {
314 $self->log('info', "found local disk '$vol' (via storage)\n");
315 } elsif ($volhash->{$vol} eq 'config') {
316 $self->log('info', "found local disk '$vol' (in current VM config)\n");
317 } elsif ($volhash->{$vol} eq 'snapshot') {
318 $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
320 $self->log('info', "found local disk '$vol'\n");
324 if ($self->{running
} && !$sharedvm) {
325 die "can't do online migration - VM uses local disks\n";
328 # additional checks for local storage
329 foreach my $volid (keys %$volhash) {
330 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
331 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $sid);
333 my $migratable = ($scfg->{type
} eq 'dir') || ($scfg->{type
} eq 'zfspool') ||
334 ($scfg->{type
} eq 'lvmthin') || ($scfg->{type
} eq 'lvm');
336 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
339 # image is a linked clone on local storage, se we can't migrate.
340 if (my $basename = (PVE
::Storage
::parse_volname
($self->{storecfg
}, $volid))[3]) {
341 die "can't migrate '$volid' as it's a clone of '$basename'";
345 foreach my $volid (keys %$volhash) {
346 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
347 push @{$self->{volumes
}}, $volid;
348 PVE
::Storage
::storage_migrate
($self->{storecfg
}, $volid, $self->{nodeip
}, $sid);
351 die "Failed to sync data - $@" if $@;
355 my ($self, $vmid) = @_;
357 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
359 my $conf = $self->{vmconf
};
361 # set migrate lock in config file
362 $conf->{lock} = 'migrate';
363 PVE
::QemuConfig-
>write_config($vmid, $conf);
365 sync_disks
($self, $vmid);
370 my ($self, $vmid, $err) = @_;
372 $self->log('info', "aborting phase 1 - cleanup resources");
374 my $conf = $self->{vmconf
};
375 delete $conf->{lock};
376 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
378 $self->log('err', $err);
381 if ($self->{volumes
}) {
382 foreach my $volid (@{$self->{volumes
}}) {
383 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
384 # fixme: try to remove ?
390 my ($self, $vmid) = @_;
392 my $conf = $self->{vmconf
};
394 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
398 my $ruri; # the whole migration dst. URI (protocol:address[:port])
399 my $nodename = PVE
::INotify
::nodename
();
401 ## start on remote node
402 my $cmd = [@{$self->{rem_ssh
}}];
405 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
406 my $res = PVE
::QemuServer
::vm_mon_cmd
($vmid, 'query-spice');
407 $spice_ticket = $res->{ticket
};
410 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
412 # we use TCP only for unsecure migrations as TCP ssh forward tunnels often
413 # did appeared to late (they are hard, if not impossible, to check for)
414 # secure migration use UNIX sockets now, this *breaks* compatibilty when trying
415 # to migrate from new to old but *not* from old to new.
416 my $datacenterconf = PVE
::Cluster
::cfs_read_file
('datacenter.cfg');
417 my $secure_migration = ($datacenterconf->{migration_unsecure
}) ?
0 : 1;
419 if (!$secure_migration) {
420 push @$cmd, '--stateuri', 'tcp';
422 push @$cmd, '--stateuri', 'unix';
425 if ($self->{forcemachine
}) {
426 push @$cmd, '--machine', $self->{forcemachine
};
431 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
432 # instead we pipe it through STDIN
433 PVE
::Tools
::run_command
($cmd, input
=> $spice_ticket, outfunc
=> sub {
436 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
439 $ruri = "tcp:$raddr:$rport";
441 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
443 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
444 $ruri = "unix:$raddr";
446 elsif ($line =~ m/^migration listens on port (\d+)$/) {
447 $raddr = "localhost";
449 $ruri = "tcp:$raddr:$rport";
451 elsif ($line =~ m/^spice listens on port (\d+)$/) {
452 $spice_port = int($1);
456 $self->log('info', $line);
459 die "unable to detect remote migration address\n" if !$raddr;
461 if ($secure_migration) {
462 $self->log('info', "start remote tunnel");
464 if ($ruri =~ /^unix:/) {
466 $self->{tunnel
} = $self->fork_tunnel("$raddr:$raddr");
467 $self->{tunnel
}->{sock_addr
} = $raddr;
469 my $unix_socket_try = 0; # wait for the socket to become ready
470 while (! -S
$raddr) {
472 if ($unix_socket_try > 100) {
474 $self->finish_tunnel($self->{tunnel
});
475 die "Timeout, migration socket $ruri did not get ready";
481 } elsif ($ruri =~ /^tcp:/) {
483 if ($raddr eq "localhost") {
484 # for backwards compatibility with older qemu-server versions
485 my $pfamily = PVE
::Tools
::get_host_address_family
($nodename);
486 my $lport = PVE
::Tools
::next_migrate_port
($pfamily);
487 $tunnel_addr = "$lport:localhost:$rport";
490 $self->{tunnel
} = $self->fork_tunnel($tunnel_addr);
493 die "unsupported protocol in migration URI: $ruri\n";
498 $self->log('info', "starting online/live migration on $ruri");
499 $self->{livemigration
} = 1;
502 my $defaults = PVE
::QemuServer
::load_defaults
();
504 # always set migrate speed (overwrite kvm default of 32m)
505 # we set a very hight default of 8192m which is basically unlimited
506 my $migrate_speed = $defaults->{migrate_speed
} || 8192;
507 $migrate_speed = $conf->{migrate_speed
} || $migrate_speed;
508 $migrate_speed = $migrate_speed * 1048576;
509 $self->log('info', "migrate_set_speed: $migrate_speed");
511 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_speed", value
=> int($migrate_speed));
513 $self->log('info', "migrate_set_speed error: $@") if $@;
515 my $migrate_downtime = $defaults->{migrate_downtime
};
516 $migrate_downtime = $conf->{migrate_downtime
} if defined($conf->{migrate_downtime
});
517 if (defined($migrate_downtime)) {
518 $self->log('info', "migrate_set_downtime: $migrate_downtime");
520 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_downtime", value
=> int($migrate_downtime*100)/100);
522 $self->log('info', "migrate_set_downtime error: $@") if $@;
525 $self->log('info', "set migration_caps");
527 PVE
::QemuServer
::set_migration_caps
($vmid);
531 #set cachesize 10% of the total memory
532 my $cachesize = int($conf->{memory
}*1048576/10);
533 $self->log('info', "set cachesize: $cachesize");
535 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate-set-cache-size", value
=> int($cachesize));
537 $self->log('info', "migrate-set-cache-size error: $@") if $@;
539 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
540 my $rpcenv = PVE
::RPCEnvironment
::get
();
541 my $authuser = $rpcenv->get_user();
543 my (undef, $proxyticket) = PVE
::AccessControl
::assemble_spice_ticket
($authuser, $vmid, $self->{node
});
545 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
546 my $subject = PVE
::AccessControl
::read_x509_subject_spice
($filename);
548 $self->log('info', "spice client_migrate_info");
551 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "client_migrate_info", protocol
=> 'spice',
552 hostname
=> $proxyticket, 'tls-port' => $spice_port,
553 'cert-subject' => $subject);
555 $self->log('info', "client_migrate_info error: $@") if $@;
559 $self->log('info', "start migrate command to $ruri");
561 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate", uri
=> $ruri);
564 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
567 my $usleep = 2000000;
571 my $downtimecounter = 0;
574 my $avglstat = $lstat/$i if $lstat;
579 $stat = PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "query-migrate");
583 warn "query migrate failed: $err\n";
584 $self->log('info', "query migrate failed: $err");
585 if ($err_count <= 5) {
589 die "too many query migrate failures - aborting\n";
592 if (defined($stat->{status
}) && $stat->{status
} =~ m/^(setup)$/im) {
597 if (defined($stat->{status
}) && $stat->{status
} =~ m/^(active|completed|failed|cancelled)$/im) {
600 if ($stat->{status
} eq 'completed') {
601 my $delay = time() - $start;
603 my $mbps = sprintf "%.2f", $conf->{memory
}/$delay;
604 my $downtime = $stat->{downtime
} || 0;
605 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
609 if ($stat->{status
} eq 'failed' || $stat->{status
} eq 'cancelled') {
610 $self->log('info', "migration status error: $stat->{status}");
614 if ($stat->{status
} ne 'active') {
615 $self->log('info', "migration status: $stat->{status}");
619 if ($stat->{ram
}->{transferred
} ne $lstat) {
620 my $trans = $stat->{ram
}->{transferred
} || 0;
621 my $rem = $stat->{ram
}->{remaining
} || 0;
622 my $total = $stat->{ram
}->{total
} || 0;
623 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
624 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
625 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
626 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
627 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
628 #reduce sleep if remainig memory if lower than the everage transfert
629 $usleep = 300000 if $avglstat && $rem < $avglstat;
631 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
632 "remaining ${rem}), total ${total})");
634 if (${xbzrlecachesize
}) {
635 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
638 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
643 if ($downtimecounter > 5) {
644 $downtimecounter = 0;
645 $migrate_downtime *= 2;
646 $self->log('info', "migrate_set_downtime: $migrate_downtime");
648 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_downtime", value
=> int($migrate_downtime*100)/100);
650 $self->log('info', "migrate_set_downtime error: $@") if $@;
656 $lstat = $stat->{ram
}->{transferred
};
660 die "unable to parse migration status '$stat->{status}' - aborting\n";
664 # just to be sure that the tunnel gets closed on successful migration, on error
665 # phase2_cleanup closes it *after* stopping the remote waiting VM
666 if (!$self->{errors
} && $self->{tunnel
}) {
667 eval { finish_tunnel
($self, $self->{tunnel
}); };
669 $self->log('err', $err);
676 my ($self, $vmid, $err) = @_;
678 return if !$self->{errors
};
679 $self->{phase2errors
} = 1;
681 $self->log('info', "aborting phase 2 - cleanup resources");
683 $self->log('info', "migrate_cancel");
685 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_cancel");
687 $self->log('info', "migrate_cancel error: $@") if $@;
689 my $conf = $self->{vmconf
};
690 delete $conf->{lock};
691 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
693 $self->log('err', $err);
696 # cleanup ressources on target host
697 my $nodename = PVE
::INotify
::nodename
();
699 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
700 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
702 $self->log('err', $err);
706 if ($self->{tunnel
}) {
707 eval { finish_tunnel
($self, $self->{tunnel
}); };
709 $self->log('err', $err);
716 my ($self, $vmid) = @_;
718 my $volids = $self->{volumes
};
719 return if $self->{phase2errors
};
721 # destroy local copies
722 foreach my $volid (@$volids) {
723 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
725 $self->log('err', "removing local copy of '$volid' failed - $err");
727 last if $err =~ /^interrupted by signal$/;
733 my ($self, $vmid, $err) = @_;
735 my $conf = $self->{vmconf
};
736 return if $self->{phase2errors
};
738 # move config to remote node
739 my $conffile = PVE
::QemuConfig-
>config_file($vmid);
740 my $newconffile = PVE
::QemuConfig-
>config_file($vmid, $self->{node
});
742 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
743 if !rename($conffile, $newconffile);
745 if ($self->{livemigration
}) {
746 # now that config file is move, we can resume vm on target if livemigrate
747 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
748 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {},
751 $self->log('err', $line);
755 $self->log('err', $err);
763 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
}) && $self->{running
}) {
764 $self->log('info', "Waiting for spice server migration");
766 my $res = PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, 'query-spice');
767 last if int($res->{'migrated'}) == 1;
775 # always stop local VM
776 eval { PVE
::QemuServer
::vm_stop
($self->{storecfg
}, $vmid, 1, 1); };
778 $self->log('err', "stopping vm failed - $err");
782 # always deactivate volumes - avoid lvm LVs to be active on several nodes
784 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
785 PVE
::Storage
::deactivate_volumes
($self->{storecfg
}, $vollist);
788 $self->log('err', $err);
793 my $cmd = [ @{$self->{rem_ssh
}}, 'qm', 'unlock', $vmid ];
794 $self->cmd_logerr($cmd, errmsg
=> "failed to clear migrate lock");
798 my ($self, $vmid) = @_;