]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
migration: implement insecure offline migration
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use POSIX qw( WNOHANG );
9 use PVE::INotify;
10 use PVE::Tools;
11 use PVE::Cluster;
12 use PVE::Storage;
13 use PVE::QemuServer;
14 use Time::HiRes qw( usleep );
15 use PVE::RPCEnvironment;
16
17 use base qw(PVE::AbstractMigrate);
18
19 sub fork_command_pipe {
20 my ($self, $cmd) = @_;
21
22 my $reader = IO::File->new();
23 my $writer = IO::File->new();
24
25 my $orig_pid = $$;
26
27 my $cpid;
28
29 eval { $cpid = open2($reader, $writer, @$cmd); };
30
31 my $err = $@;
32
33 # catch exec errors
34 if ($orig_pid != $$) {
35 $self->log('err', "can't fork command pipe\n");
36 POSIX::_exit(1);
37 kill('KILL', $$);
38 }
39
40 die $err if $err;
41
42 return { writer => $writer, reader => $reader, pid => $cpid };
43 }
44
45 sub finish_command_pipe {
46 my ($self, $cmdpipe, $timeout) = @_;
47
48 my $cpid = $cmdpipe->{pid};
49 return if !defined($cpid);
50
51 my $writer = $cmdpipe->{writer};
52 my $reader = $cmdpipe->{reader};
53
54 $writer->close();
55 $reader->close();
56
57 my $collect_child_process = sub {
58 my $res = waitpid($cpid, WNOHANG);
59 if (defined($res) && ($res == $cpid)) {
60 delete $cmdpipe->{cpid};
61 return 1;
62 } else {
63 return 0;
64 }
65 };
66
67 if ($timeout) {
68 for (my $i = 0; $i < $timeout; $i++) {
69 return if &$collect_child_process();
70 sleep(1);
71 }
72 }
73
74 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
75 kill(15, $cpid);
76
77 # wait again
78 for (my $i = 0; $i < 10; $i++) {
79 return if &$collect_child_process();
80 sleep(1);
81 }
82
83 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
84 kill 9, $cpid;
85 sleep 1;
86
87 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
88 if !&$collect_child_process();
89 }
90
91 sub fork_tunnel {
92 my ($self, $tunnel_addr) = @_;
93
94 my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : ();
95
96 my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ];
97
98 my $tunnel = $self->fork_command_pipe($cmd);
99
100 my $reader = $tunnel->{reader};
101
102 my $helo;
103 eval {
104 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
105 die "no reply\n" if !$helo;
106 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
107 die "got strange reply from mtunnel ('$helo')\n"
108 if $helo !~ m/^tunnel online$/;
109 };
110 my $err = $@;
111
112 if ($err) {
113 $self->finish_command_pipe($tunnel);
114 die "can't open migration tunnel - $err";
115 }
116 return $tunnel;
117 }
118
119 sub finish_tunnel {
120 my ($self, $tunnel) = @_;
121
122 my $writer = $tunnel->{writer};
123
124 eval {
125 PVE::Tools::run_with_timeout(30, sub {
126 print $writer "quit\n";
127 $writer->flush();
128 });
129 };
130 my $err = $@;
131
132 $self->finish_command_pipe($tunnel, 30);
133
134 if ($tunnel->{sock_addr}) {
135 # ssh does not clean up on local host
136 my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; #
137 PVE::Tools::run_command($cmd);
138
139 # .. and just to be sure check on remote side
140 unshift @{$cmd}, @{$self->{rem_ssh}};
141 PVE::Tools::run_command($cmd);
142 }
143
144 die $err if $err;
145 }
146
147 sub lock_vm {
148 my ($self, $vmid, $code, @param) = @_;
149
150 return PVE::QemuConfig->lock_config($vmid, $code, @param);
151 }
152
153 sub prepare {
154 my ($self, $vmid) = @_;
155
156 my $online = $self->{opts}->{online};
157
158 $self->{storecfg} = PVE::Storage::config();
159
160 # test if VM exists
161 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
162
163 PVE::QemuConfig->check_lock($conf);
164
165 my $running = 0;
166 if (my $pid = PVE::QemuServer::check_running($vmid)) {
167 die "can't migrate running VM without --online\n" if !$online;
168 $running = $pid;
169
170 $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
171
172 }
173
174 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
175 if ($self->{running} || !$self->{opts}->{force}) {
176 die "can't migrate VM which uses local devices\n";
177 } else {
178 $self->log('info', "migrating VM which uses local devices");
179 }
180 }
181
182 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
183
184 my $need_activate = [];
185 foreach my $volid (@$vollist) {
186 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
187
188 # check if storage is available on both nodes
189 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
190
191 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
192 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
193
194 if ($scfg->{shared}) {
195 # PVE::Storage::activate_storage checks this for non-shared storages
196 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
197 warn "Used shared storage '$sid' is not online on source node!\n"
198 if !$plugin->check_connection($sid, $scfg);
199 } else {
200 # only activate if not shared
201 push @$need_activate, $volid;
202 }
203 }
204
205 # activate volumes
206 PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
207
208 # test ssh connection
209 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
210 eval { $self->cmd_quiet($cmd); };
211 die "Can't connect to destination address using public key\n" if $@;
212
213 return $running;
214 }
215
216 sub sync_disks {
217 my ($self, $vmid) = @_;
218
219 my $conf = $self->{vmconf};
220
221 # local volumes which have been copied
222 $self->{volumes} = [];
223
224 my $res = [];
225
226 eval {
227
228 # found local volumes and their origin
229 my $local_volumes = {};
230 my $local_volumes_errors = {};
231 my $other_errors = [];
232 my $abort = 0;
233
234 my $sharedvm = 1;
235
236 my $log_error = sub {
237 my ($msg, $volid) = @_;
238
239 if (defined($volid)) {
240 $local_volumes_errors->{$volid} = $msg;
241 } else {
242 push @$other_errors, $msg;
243 }
244 $abort = 1;
245 };
246
247 my @sids = PVE::Storage::storage_ids($self->{storecfg});
248 foreach my $storeid (@sids) {
249 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
250 next if $scfg->{shared};
251 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
252
253 # get list from PVE::Storage (for unused volumes)
254 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
255
256 next if @{$dl->{$storeid}} == 0;
257
258 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $storeid;
259
260 # check if storage is available on target node
261 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
262 $sharedvm = 0; # there is a non-shared disk
263
264 PVE::Storage::foreach_volid($dl, sub {
265 my ($volid, $sid, $volname) = @_;
266
267 $local_volumes->{$volid} = 'storage';
268 });
269 }
270
271 my $test_volid = sub {
272 my ($volid, $is_cdrom, $snapname) = @_;
273
274 return if !$volid;
275
276 if ($volid =~ m|^/|) {
277 $local_volumes->{$volid} = 'config';
278 die "local file/device\n";
279 }
280
281 if ($is_cdrom) {
282 if ($volid eq 'cdrom') {
283 my $msg = "can't migrate local cdrom drive";
284 $msg .= " (referenced in snapshot '$snapname')"
285 if defined($snapname);
286
287 &$log_error("$msg\n");
288 return;
289 }
290 return if $volid eq 'none';
291 }
292
293 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
294
295 my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
296 # check if storage is available on both nodes
297 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
298 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
299
300 return if $scfg->{shared};
301
302 $sharedvm = 0;
303
304 $local_volumes->{$volid} = defined($snapname) ? 'snapshot' : 'config';
305
306 die "local cdrom image\n" if $is_cdrom;
307
308 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
309
310 die "owned by other VM (owner = VM $owner)\n"
311 if !$owner || ($owner != $self->{vmid});
312
313 if (defined($snapname)) {
314 # we cannot migrate shapshots on local storage
315 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
316
317 my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
318 die "online storage migration not possible if snapshot exists\n" if $self->{running};
319 if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) {
320 die "non-migratable snapshot exists\n";
321 }
322 }
323
324 die "referenced by linked clone(s)\n"
325 if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
326 };
327
328 my $test_drive = sub {
329 my ($ds, $drive, $snapname) = @_;
330
331 eval {
332 &$test_volid($drive->{file}, PVE::QemuServer::drive_is_cdrom($drive), $snapname);
333 };
334
335 &$log_error($@, $drive->{file}) if $@;
336 };
337
338 foreach my $snapname (keys %{$conf->{snapshots}}) {
339 eval {
340 &$test_volid($conf->{snapshots}->{$snapname}->{'vmstate'}, 0, undef)
341 if defined($conf->{snapshots}->{$snapname}->{'vmstate'});
342 };
343 &$log_error($@, $conf->{snapshots}->{$snapname}->{'vmstate'}) if $@;
344
345 PVE::QemuServer::foreach_drive($conf->{snapshots}->{$snapname}, $test_drive, $snapname);
346 }
347 PVE::QemuServer::foreach_drive($conf, $test_drive);
348
349 foreach my $vol (sort keys %$local_volumes) {
350 if ($local_volumes->{$vol} eq 'storage') {
351 $self->log('info', "found local disk '$vol' (via storage)\n");
352 } elsif ($local_volumes->{$vol} eq 'config') {
353 die "can't live migrate attached local disks without with-local-disks option\n" if $self->{running} && !$self->{opts}->{"with-local-disks"};
354 $self->log('info', "found local disk '$vol' (in current VM config)\n");
355 } elsif ($local_volumes->{$vol} eq 'snapshot') {
356 $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n");
357 } else {
358 $self->log('info', "found local disk '$vol'\n");
359 }
360 }
361
362 foreach my $vol (sort keys %$local_volumes_errors) {
363 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
364 }
365 foreach my $err (@$other_errors) {
366 $self->log('warn', "$err");
367 }
368
369 if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) {
370 $self->{opts}->{targetstorage} = 1; #use same sid for remote local
371 }
372
373 if ($abort) {
374 die "can't migrate VM - check log\n";
375 }
376
377 # additional checks for local storage
378 foreach my $volid (keys %$local_volumes) {
379 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
380 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
381
382 my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') ||
383 ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm');
384
385 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
386 if !$migratable;
387
388 # image is a linked clone on local storage, se we can't migrate.
389 if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
390 die "can't migrate '$volid' as it's a clone of '$basename'";
391 }
392 }
393
394 $self->log('info', "copying disk images");
395
396 foreach my $volid (keys %$local_volumes) {
397 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
398 if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid} eq 'config') {
399 push @{$self->{online_local_volumes}}, $volid;
400 } else {
401 push @{$self->{volumes}}, $volid;
402 my $insecure = $self->{opts}->{migration_type} eq 'insecure';
403 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid, undef, undef, undef, undef, $insecure);
404 }
405 }
406 };
407 die "Failed to sync data - $@" if $@;
408 }
409
410 sub cleanup_remotedisks {
411 my ($self) = @_;
412
413 foreach my $target_drive (keys %{$self->{target_drive}}) {
414
415 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
416 my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
417
418 my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
419
420 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
421 if (my $err = $@) {
422 $self->log('err', $err);
423 $self->{errors} = 1;
424 }
425 }
426 }
427
428 sub phase1 {
429 my ($self, $vmid) = @_;
430
431 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
432
433 my $conf = $self->{vmconf};
434
435 # set migrate lock in config file
436 $conf->{lock} = 'migrate';
437 PVE::QemuConfig->write_config($vmid, $conf);
438
439 # we use TCP only for unsecure migrations as TCP ssh forward tunnels often
440 # did appeared to late (they are hard, if not impossible, to check for)
441 # secure migration use UNIX sockets now, this *breaks* compatibilty when trying
442 # to migrate from new to old but *not* from old to new.
443 my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg');
444
445 my $migration_type = 'secure';
446 if (defined($self->{opts}->{migration_type})) {
447 $migration_type = $self->{opts}->{migration_type};
448 } elsif (defined($datacenterconf->{migration}->{type})) {
449 $migration_type = $datacenterconf->{migration}->{type};
450 }
451 $self->{opts}->{migration_type} = $migration_type;
452
453 sync_disks($self, $vmid);
454
455 };
456
457 sub phase1_cleanup {
458 my ($self, $vmid, $err) = @_;
459
460 $self->log('info', "aborting phase 1 - cleanup resources");
461
462 my $conf = $self->{vmconf};
463 delete $conf->{lock};
464 eval { PVE::QemuConfig->write_config($vmid, $conf) };
465 if (my $err = $@) {
466 $self->log('err', $err);
467 }
468
469 if ($self->{volumes}) {
470 foreach my $volid (@{$self->{volumes}}) {
471 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
472 # fixme: try to remove ?
473 }
474 }
475 }
476
477 sub phase2 {
478 my ($self, $vmid) = @_;
479
480 my $conf = $self->{vmconf};
481
482 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
483
484 my $raddr;
485 my $rport;
486 my $ruri; # the whole migration dst. URI (protocol:address[:port])
487 my $nodename = PVE::INotify::nodename();
488
489 ## start on remote node
490 my $cmd = [@{$self->{rem_ssh}}];
491
492 my $spice_ticket;
493 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
494 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
495 $spice_ticket = $res->{ticket};
496 }
497
498 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
499
500 my $migration_type = $self->{opts}->{migration_type};
501
502 push @$cmd, '--migration_type', $migration_type;
503
504 push @$cmd, '--migration_network', $self->{opts}->{migration_network}
505 if $self->{opts}->{migration_network};
506
507 if ($migration_type eq 'insecure') {
508 push @$cmd, '--stateuri', 'tcp';
509 } else {
510 push @$cmd, '--stateuri', 'unix';
511 }
512
513 if ($self->{forcemachine}) {
514 push @$cmd, '--machine', $self->{forcemachine};
515 }
516
517 if ($self->{opts}->{targetstorage}) {
518 push @$cmd, '--targetstorage', $self->{opts}->{targetstorage};
519 }
520
521 my $spice_port;
522
523 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
524 # instead we pipe it through STDIN
525 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
526 my $line = shift;
527
528 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
529 $raddr = $1;
530 $rport = int($2);
531 $ruri = "tcp:$raddr:$rport";
532 }
533 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
534 $raddr = $1;
535 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
536 $ruri = "unix:$raddr";
537 }
538 elsif ($line =~ m/^migration listens on port (\d+)$/) {
539 $raddr = "localhost";
540 $rport = int($1);
541 $ruri = "tcp:$raddr:$rport";
542 }
543 elsif ($line =~ m/^spice listens on port (\d+)$/) {
544 $spice_port = int($1);
545 }
546 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
547 my $volid = $4;
548 my $nbd_uri = "nbd:$1:$2:exportname=$3";
549 my $targetdrive = $3;
550 $targetdrive =~ s/drive-//g;
551
552 $self->{target_drive}->{$targetdrive}->{volid} = $volid;
553 $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
554
555 }
556 }, errfunc => sub {
557 my $line = shift;
558 $self->log('info', $line);
559 });
560
561 die "unable to detect remote migration address\n" if !$raddr;
562
563 if ($migration_type eq 'secure') {
564 $self->log('info', "start remote tunnel");
565
566 if ($ruri =~ /^unix:/) {
567 unlink $raddr;
568 $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr");
569 $self->{tunnel}->{sock_addr} = $raddr;
570
571 my $unix_socket_try = 0; # wait for the socket to become ready
572 while (! -S $raddr) {
573 $unix_socket_try++;
574 if ($unix_socket_try > 100) {
575 $self->{errors} = 1;
576 $self->finish_tunnel($self->{tunnel});
577 die "Timeout, migration socket $ruri did not get ready";
578 }
579
580 usleep(50000);
581 }
582
583 } elsif ($ruri =~ /^tcp:/) {
584 my $tunnel_addr;
585 if ($raddr eq "localhost") {
586 # for backwards compatibility with older qemu-server versions
587 my $pfamily = PVE::Tools::get_host_address_family($nodename);
588 my $lport = PVE::Tools::next_migrate_port($pfamily);
589 $tunnel_addr = "$lport:localhost:$rport";
590 }
591
592 $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
593
594 } else {
595 die "unsupported protocol in migration URI: $ruri\n";
596 }
597 }
598
599 my $start = time();
600
601 if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) {
602 $self->{storage_migration} = 1;
603 $self->{storage_migration_jobs} = {};
604 $self->log('info', "starting storage migration");
605
606 die "The number of local disks does not match between the source and the destination.\n"
607 if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
608 foreach my $drive (keys %{$self->{target_drive}}){
609 my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
610 $self->log('info', "$drive: start migration to to $nbd_uri");
611 PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1);
612 }
613 }
614
615 $self->log('info', "starting online/live migration on $ruri");
616 $self->{livemigration} = 1;
617
618 # load_defaults
619 my $defaults = PVE::QemuServer::load_defaults();
620
621 # always set migrate speed (overwrite kvm default of 32m)
622 # we set a very hight default of 8192m which is basically unlimited
623 my $migrate_speed = $defaults->{migrate_speed} || 8192;
624 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
625 $migrate_speed = $migrate_speed * 1048576;
626 $self->log('info', "migrate_set_speed: $migrate_speed");
627 eval {
628 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
629 };
630 $self->log('info', "migrate_set_speed error: $@") if $@;
631
632 my $migrate_downtime = $defaults->{migrate_downtime};
633 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
634 if (defined($migrate_downtime)) {
635 $self->log('info', "migrate_set_downtime: $migrate_downtime");
636 eval {
637 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
638 };
639 $self->log('info', "migrate_set_downtime error: $@") if $@;
640 }
641
642 $self->log('info', "set migration_caps");
643 eval {
644 PVE::QemuServer::set_migration_caps($vmid);
645 };
646 warn $@ if $@;
647
648 #set cachesize 10% of the total memory
649 my $cachesize = int($conf->{memory}*1048576/10);
650 $self->log('info', "set cachesize: $cachesize");
651 eval {
652 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize));
653 };
654 $self->log('info', "migrate-set-cache-size error: $@") if $@;
655
656 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
657 my $rpcenv = PVE::RPCEnvironment::get();
658 my $authuser = $rpcenv->get_user();
659
660 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
661
662 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
663 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
664
665 $self->log('info', "spice client_migrate_info");
666
667 eval {
668 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
669 hostname => $proxyticket, 'tls-port' => $spice_port,
670 'cert-subject' => $subject);
671 };
672 $self->log('info', "client_migrate_info error: $@") if $@;
673
674 }
675
676 $self->log('info', "start migrate command to $ruri");
677 eval {
678 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri);
679 };
680 my $merr = $@;
681 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
682
683 my $lstat = 0;
684 my $usleep = 2000000;
685 my $i = 0;
686 my $err_count = 0;
687 my $lastrem = undef;
688 my $downtimecounter = 0;
689 while (1) {
690 $i++;
691 my $avglstat = $lstat/$i if $lstat;
692
693 usleep($usleep);
694 my $stat;
695 eval {
696 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
697 };
698 if (my $err = $@) {
699 $err_count++;
700 warn "query migrate failed: $err\n";
701 $self->log('info', "query migrate failed: $err");
702 if ($err_count <= 5) {
703 usleep(1000000);
704 next;
705 }
706 die "too many query migrate failures - aborting\n";
707 }
708
709 if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) {
710 sleep(1);
711 next;
712 }
713
714 if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
715 $merr = undef;
716 $err_count = 0;
717 if ($stat->{status} eq 'completed') {
718 my $delay = time() - $start;
719 if ($delay > 0) {
720 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
721 my $downtime = $stat->{downtime} || 0;
722 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
723 }
724 }
725
726 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
727 $self->log('info', "migration status error: $stat->{status}");
728 die "aborting\n"
729 }
730
731 if ($stat->{status} ne 'active') {
732 $self->log('info', "migration status: $stat->{status}");
733 last;
734 }
735
736 if ($stat->{ram}->{transferred} ne $lstat) {
737 my $trans = $stat->{ram}->{transferred} || 0;
738 my $rem = $stat->{ram}->{remaining} || 0;
739 my $total = $stat->{ram}->{total} || 0;
740 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
741 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
742 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
743 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
744 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
745 #reduce sleep if remainig memory if lower than the everage transfert
746 $usleep = 300000 if $avglstat && $rem < $avglstat;
747
748 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
749 "remaining ${rem}), total ${total})");
750
751 if (${xbzrlecachesize}) {
752 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
753 }
754
755 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
756 $downtimecounter++;
757 }
758 $lastrem = $rem;
759
760 if ($downtimecounter > 5) {
761 $downtimecounter = 0;
762 $migrate_downtime *= 2;
763 $self->log('info', "migrate_set_downtime: $migrate_downtime");
764 eval {
765 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
766 };
767 $self->log('info', "migrate_set_downtime error: $@") if $@;
768 }
769
770 }
771
772
773 $lstat = $stat->{ram}->{transferred};
774
775 } else {
776 die $merr if $merr;
777 die "unable to parse migration status '$stat->{status}' - aborting\n";
778 }
779 }
780
781 # just to be sure that the tunnel gets closed on successful migration, on error
782 # phase2_cleanup closes it *after* stopping the remote waiting VM
783 if (!$self->{errors} && $self->{tunnel}) {
784 eval { finish_tunnel($self, $self->{tunnel}); };
785 if (my $err = $@) {
786 $self->log('err', $err);
787 $self->{errors} = 1;
788 }
789 }
790 }
791
792 sub phase2_cleanup {
793 my ($self, $vmid, $err) = @_;
794
795 return if !$self->{errors};
796 $self->{phase2errors} = 1;
797
798 $self->log('info', "aborting phase 2 - cleanup resources");
799
800 $self->log('info', "migrate_cancel");
801 eval {
802 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
803 };
804 $self->log('info', "migrate_cancel error: $@") if $@;
805
806 my $conf = $self->{vmconf};
807 delete $conf->{lock};
808 eval { PVE::QemuConfig->write_config($vmid, $conf) };
809 if (my $err = $@) {
810 $self->log('err', $err);
811 }
812
813 # cleanup ressources on target host
814 if ($self->{storage_migration}) {
815
816 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
817 if (my $err = $@) {
818 $self->log('err', $err);
819 }
820
821 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
822 if (my $err = $@) {
823 $self->log('err', $err);
824 }
825 }
826
827 my $nodename = PVE::INotify::nodename();
828
829 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
830 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
831 if (my $err = $@) {
832 $self->log('err', $err);
833 $self->{errors} = 1;
834 }
835
836 if ($self->{tunnel}) {
837 eval { finish_tunnel($self, $self->{tunnel}); };
838 if (my $err = $@) {
839 $self->log('err', $err);
840 $self->{errors} = 1;
841 }
842 }
843 }
844
845 sub phase3 {
846 my ($self, $vmid) = @_;
847
848 my $volids = $self->{volumes};
849 return if $self->{phase2errors};
850
851 # destroy local copies
852 foreach my $volid (@$volids) {
853 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
854 if (my $err = $@) {
855 $self->log('err', "removing local copy of '$volid' failed - $err");
856 $self->{errors} = 1;
857 last if $err =~ /^interrupted by signal$/;
858 }
859 }
860 }
861
862 sub phase3_cleanup {
863 my ($self, $vmid, $err) = @_;
864
865 my $conf = $self->{vmconf};
866 return if $self->{phase2errors};
867
868 if ($self->{storage_migration}) {
869 # finish block-job
870 eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); };
871
872 if (my $err = $@) {
873 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
874 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
875 die "Failed to completed storage migration\n";
876 } else {
877 foreach my $target_drive (keys %{$self->{target_drive}}) {
878 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
879 $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
880 PVE::QemuConfig->write_config($vmid, $conf);
881 }
882 }
883 }
884
885 # move config to remote node
886 my $conffile = PVE::QemuConfig->config_file($vmid);
887 my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
888
889 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
890 if !rename($conffile, $newconffile);
891
892 if ($self->{livemigration}) {
893 if ($self->{storage_migration}) {
894 # remove drives referencing the nbd server from source
895 # otherwise vm_stop might hang later on
896 foreach my $drive (keys %{$self->{target_drive}}){
897 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "device_del", id => $drive);
898 }
899 # stop nbd server on remote vm - requirement for resume since 2.9
900 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
901
902 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
903 if (my $err = $@) {
904 $self->log('err', $err);
905 $self->{errors} = 1;
906 }
907 }
908 # config moved and nbd server stopped - now we can resume vm on target
909 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
910 eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
911 errfunc => sub {
912 my $line = shift;
913 $self->log('err', $line);
914 });
915 };
916 if (my $err = $@) {
917 $self->log('err', $err);
918 $self->{errors} = 1;
919 }
920 }
921
922 eval {
923 my $timer = 0;
924 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
925 $self->log('info', "Waiting for spice server migration");
926 while (1) {
927 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
928 last if int($res->{'migrated'}) == 1;
929 last if $timer > 50;
930 $timer ++;
931 usleep(200000);
932 }
933 }
934 };
935
936 # always stop local VM
937 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
938 if (my $err = $@) {
939 $self->log('err', "stopping vm failed - $err");
940 $self->{errors} = 1;
941 }
942
943 # always deactivate volumes - avoid lvm LVs to be active on several nodes
944 eval {
945 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
946 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
947 };
948 if (my $err = $@) {
949 $self->log('err', $err);
950 $self->{errors} = 1;
951 }
952
953 if($self->{storage_migration}) {
954 # destroy local copies
955 my $volids = $self->{online_local_volumes};
956
957 foreach my $volid (@$volids) {
958 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
959 if (my $err = $@) {
960 $self->log('err', "removing local copy of '$volid' failed - $err");
961 $self->{errors} = 1;
962 last if $err =~ /^interrupted by signal$/;
963 }
964 }
965
966 }
967
968 # clear migrate lock
969 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
970 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
971 }
972
973 sub final_cleanup {
974 my ($self, $vmid) = @_;
975
976 # nothing to do
977 }
978
979 1;