]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
9cff64d0ec3ba9bc4e5294d2994d125210f642fc
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5
6 use IO::File;
7 use IPC::Open2;
8 use POSIX qw( WNOHANG );
9 use Time::HiRes qw( usleep );
10
11 use PVE::Cluster;
12 use PVE::INotify;
13 use PVE::RPCEnvironment;
14 use PVE::Replication;
15 use PVE::ReplicationConfig;
16 use PVE::ReplicationState;
17 use PVE::Storage;
18 use PVE::Tools;
19
20 use PVE::QemuServer::Drive;
21 use PVE::QemuServer::Helpers qw(min_version);
22 use PVE::QemuServer::Machine;
23 use PVE::QemuServer::Monitor qw(mon_cmd);
24 use PVE::QemuServer;
25
26 use PVE::AbstractMigrate;
27 use base qw(PVE::AbstractMigrate);
28
29 sub fork_command_pipe {
30 my ($self, $cmd) = @_;
31
32 my $reader = IO::File->new();
33 my $writer = IO::File->new();
34
35 my $orig_pid = $$;
36
37 my $cpid;
38
39 eval { $cpid = open2($reader, $writer, @$cmd); };
40
41 my $err = $@;
42
43 # catch exec errors
44 if ($orig_pid != $$) {
45 $self->log('err', "can't fork command pipe\n");
46 POSIX::_exit(1);
47 kill('KILL', $$);
48 }
49
50 die $err if $err;
51
52 return { writer => $writer, reader => $reader, pid => $cpid };
53 }
54
55 sub finish_command_pipe {
56 my ($self, $cmdpipe, $timeout) = @_;
57
58 my $cpid = $cmdpipe->{pid};
59 return if !defined($cpid);
60
61 my $writer = $cmdpipe->{writer};
62 my $reader = $cmdpipe->{reader};
63
64 $writer->close();
65 $reader->close();
66
67 my $collect_child_process = sub {
68 my $res = waitpid($cpid, WNOHANG);
69 if (defined($res) && ($res == $cpid)) {
70 delete $cmdpipe->{cpid};
71 return 1;
72 } else {
73 return 0;
74 }
75 };
76
77 if ($timeout) {
78 for (my $i = 0; $i < $timeout; $i++) {
79 return if &$collect_child_process();
80 sleep(1);
81 }
82 }
83
84 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
85 kill(15, $cpid);
86
87 # wait again
88 for (my $i = 0; $i < 10; $i++) {
89 return if &$collect_child_process();
90 sleep(1);
91 }
92
93 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
94 kill 9, $cpid;
95 sleep 1;
96
97 $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n")
98 if !&$collect_child_process();
99 }
100
101 sub read_tunnel {
102 my ($self, $tunnel, $timeout) = @_;
103
104 $timeout = 60 if !defined($timeout);
105
106 my $reader = $tunnel->{reader};
107
108 my $output;
109 eval {
110 PVE::Tools::run_with_timeout($timeout, sub { $output = <$reader>; });
111 };
112 die "reading from tunnel failed: $@\n" if $@;
113
114 chomp $output;
115
116 return $output;
117 }
118
119 sub write_tunnel {
120 my ($self, $tunnel, $timeout, $command) = @_;
121
122 $timeout = 60 if !defined($timeout);
123
124 my $writer = $tunnel->{writer};
125
126 eval {
127 PVE::Tools::run_with_timeout($timeout, sub {
128 print $writer "$command\n";
129 $writer->flush();
130 });
131 };
132 die "writing to tunnel failed: $@\n" if $@;
133
134 if ($tunnel->{version} && $tunnel->{version} >= 1) {
135 my $res = eval { $self->read_tunnel($tunnel, 10); };
136 die "no reply to command '$command': $@\n" if $@;
137
138 if ($res eq 'OK') {
139 return;
140 } else {
141 die "tunnel replied '$res' to command '$command'\n";
142 }
143 }
144 }
145
146 sub fork_tunnel {
147 my ($self, $tunnel_addr) = @_;
148
149 my @localtunnelinfo = ();
150 foreach my $addr (@$tunnel_addr) {
151 push @localtunnelinfo, '-L', $addr;
152 }
153
154 my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ];
155
156 my $tunnel = $self->fork_command_pipe($cmd);
157
158 eval {
159 my $helo = $self->read_tunnel($tunnel, 60);
160 die "no reply\n" if !$helo;
161 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
162 die "got strange reply from mtunnel ('$helo')\n"
163 if $helo !~ m/^tunnel online$/;
164 };
165 my $err = $@;
166
167 eval {
168 my $ver = $self->read_tunnel($tunnel, 10);
169 if ($ver =~ /^ver (\d+)$/) {
170 $tunnel->{version} = $1;
171 $self->log('info', "ssh tunnel $ver\n");
172 } else {
173 $err = "received invalid tunnel version string '$ver'\n" if !$err;
174 }
175 };
176
177 if ($err) {
178 $self->finish_command_pipe($tunnel);
179 die "can't open migration tunnel - $err";
180 }
181 return $tunnel;
182 }
183
184 sub finish_tunnel {
185 my ($self, $tunnel) = @_;
186
187 eval { $self->write_tunnel($tunnel, 30, 'quit'); };
188 my $err = $@;
189
190 $self->finish_command_pipe($tunnel, 30);
191
192 if ($tunnel->{sock_addr}) {
193 # ssh does not clean up on local host
194 my $cmd = ['rm', '-f', @{$tunnel->{sock_addr}}]; #
195 PVE::Tools::run_command($cmd);
196
197 # .. and just to be sure check on remote side
198 unshift @{$cmd}, @{$self->{rem_ssh}};
199 PVE::Tools::run_command($cmd);
200 }
201
202 die $err if $err;
203 }
204
205 sub lock_vm {
206 my ($self, $vmid, $code, @param) = @_;
207
208 return PVE::QemuConfig->lock_config($vmid, $code, @param);
209 }
210
211 sub prepare {
212 my ($self, $vmid) = @_;
213
214 my $online = $self->{opts}->{online};
215
216 $self->{storecfg} = PVE::Storage::config();
217
218 # test if VM exists
219 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
220
221 PVE::QemuConfig->check_lock($conf);
222
223 my $running = 0;
224 if (my $pid = PVE::QemuServer::check_running($vmid)) {
225 die "can't migrate running VM without --online\n" if !$online;
226 $running = $pid;
227
228 $self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf);
229
230 }
231 my $loc_res = PVE::QemuServer::check_local_resources($conf, 1);
232 if (scalar @$loc_res) {
233 if ($self->{running} || !$self->{opts}->{force}) {
234 die "can't migrate VM which uses local devices: " . join(", ", @$loc_res) . "\n";
235 } else {
236 $self->log('info', "migrating VM which uses local devices");
237 }
238 }
239
240 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
241
242 my $need_activate = [];
243 foreach my $volid (@$vollist) {
244 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
245
246 # check if storage is available on both nodes
247 my $targetsid = $self->{opts}->{targetstorage} // $sid;
248
249 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
250 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
251
252 if ($scfg->{shared}) {
253 # PVE::Storage::activate_storage checks this for non-shared storages
254 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
255 warn "Used shared storage '$sid' is not online on source node!\n"
256 if !$plugin->check_connection($sid, $scfg);
257 } else {
258 # only activate if not shared
259 next if ($volid =~ m/vm-\d+-cloudinit/);
260 push @$need_activate, $volid;
261 }
262 }
263
264 # activate volumes
265 PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
266
267 # test ssh connection
268 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
269 eval { $self->cmd_quiet($cmd); };
270 die "Can't connect to destination address using public key\n" if $@;
271
272 return $running;
273 }
274
275 sub sync_disks {
276 my ($self, $vmid) = @_;
277
278 my $conf = $self->{vmconf};
279
280 # local volumes which have been copied
281 $self->{volumes} = [];
282
283 my $override_targetsid = $self->{opts}->{targetstorage};
284
285 eval {
286
287 # found local volumes and their origin
288 my $local_volumes = {};
289 my $local_volumes_errors = {};
290 my $other_errors = [];
291 my $abort = 0;
292
293 my $log_error = sub {
294 my ($msg, $volid) = @_;
295
296 if (defined($volid)) {
297 $local_volumes_errors->{$volid} = $msg;
298 } else {
299 push @$other_errors, $msg;
300 }
301 $abort = 1;
302 };
303
304 my @sids = PVE::Storage::storage_ids($self->{storecfg});
305 foreach my $storeid (@sids) {
306 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
307 next if $scfg->{shared};
308 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
309
310 # get list from PVE::Storage (for unused volumes)
311 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
312
313 next if @{$dl->{$storeid}} == 0;
314
315 my $targetsid = $override_targetsid // $storeid;
316
317 # check if storage is available on target node
318 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
319
320 PVE::Storage::foreach_volid($dl, sub {
321 my ($volid, $sid, $volinfo) = @_;
322
323 $local_volumes->{$volid}->{ref} = 'storage';
324
325 # If with_snapshots is not set for storage migrate, it tries to use
326 # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size
327 # back to qcow2 is currently not possible.
328 $local_volumes->{$volid}->{snapshots} = ($volinfo->{format} =~ /^(?:qcow2|vmdk)$/);
329 $local_volumes->{$volid}->{format} = $volinfo->{format};
330 });
331 }
332
333 my $replicatable_volumes = PVE::QemuConfig->get_replicatable_volumes($self->{storecfg}, $self->{vmid}, $conf, 0, 1);
334
335 my $test_volid = sub {
336 my ($volid, $attr) = @_;
337
338 if ($volid =~ m|^/|) {
339 return if $attr->{shared};
340 $local_volumes->{$volid}->{ref} = 'config';
341 die "local file/device\n";
342 }
343
344 my $snaprefs = $attr->{referenced_in_snapshot};
345
346 if ($attr->{cdrom}) {
347 if ($volid eq 'cdrom') {
348 my $msg = "can't migrate local cdrom drive";
349 if (defined($snaprefs) && !$attr->{referenced_in_config}) {
350 my $snapnames = join(', ', sort keys %$snaprefs);
351 $msg .= " (referenced in snapshot - $snapnames)";
352 }
353 &$log_error("$msg\n");
354 return;
355 }
356 return if $volid eq 'none';
357 }
358
359 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
360
361 my $targetsid = $override_targetsid // $sid;
362 # check if storage is available on both nodes
363 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
364 PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
365
366 return if $scfg->{shared};
367
368 $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot';
369
370 if ($attr->{cdrom}) {
371 if ($volid =~ /vm-\d+-cloudinit/) {
372 $local_volumes->{$volid}->{ref} = 'generated';
373 return;
374 }
375 die "local cdrom image\n";
376 }
377
378 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
379
380 die "owned by other VM (owner = VM $owner)\n"
381 if !$owner || ($owner != $self->{vmid});
382
383 if (defined($snaprefs)) {
384 $local_volumes->{$volid}->{snapshots} = 1;
385
386 # we cannot migrate shapshots on local storage
387 # exceptions: 'zfspool' or 'qcow2' files (on directory storage)
388
389 die "online storage migration not possible if snapshot exists\n" if $self->{running};
390 if (!($scfg->{type} eq 'zfspool' || $local_volumes->{$volid}->{format} eq 'qcow2')) {
391 die "non-migratable snapshot exists\n";
392 }
393 }
394
395 die "referenced by linked clone(s)\n"
396 if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid);
397 };
398
399 PVE::QemuServer::foreach_volid($conf, sub {
400 my ($volid, $attr) = @_;
401 eval { $test_volid->($volid, $attr); };
402 if (my $err = $@) {
403 &$log_error($err, $volid);
404 }
405 });
406
407 foreach my $vol (sort keys %$local_volumes) {
408 my $type = $replicatable_volumes->{$vol} ? 'local, replicated' : 'local';
409 my $ref = $local_volumes->{$vol}->{ref};
410 if ($ref eq 'storage') {
411 $self->log('info', "found $type disk '$vol' (via storage)\n");
412 } elsif ($ref eq 'config') {
413 &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol)
414 if $self->{running} && !$self->{opts}->{"with-local-disks"};
415 $self->log('info', "found $type disk '$vol' (in current VM config)\n");
416 } elsif ($ref eq 'snapshot') {
417 $self->log('info', "found $type disk '$vol' (referenced by snapshot(s))\n");
418 } elsif ($ref eq 'generated') {
419 $self->log('info', "found generated disk '$vol' (in current VM config)\n");
420 } else {
421 $self->log('info', "found $type disk '$vol'\n");
422 }
423 }
424
425 foreach my $vol (sort keys %$local_volumes_errors) {
426 $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}");
427 }
428 foreach my $err (@$other_errors) {
429 $self->log('warn', "$err");
430 }
431
432 if ($abort) {
433 die "can't migrate VM - check log\n";
434 }
435
436 # additional checks for local storage
437 foreach my $volid (keys %$local_volumes) {
438 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
439 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
440
441 my $migratable = $scfg->{type} =~ /^(?:dir|zfspool|lvmthin|lvm)$/;
442
443 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
444 if !$migratable;
445
446 # image is a linked clone on local storage, se we can't migrate.
447 if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
448 die "can't migrate '$volid' as it's a clone of '$basename'";
449 }
450 }
451
452 my $rep_cfg = PVE::ReplicationConfig->new();
453 if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) {
454 if ($self->{running}) {
455
456 my $version = PVE::QemuServer::kvm_user_version();
457 if (!min_version($version, 4, 2)) {
458 die "can't live migrate VM with replicated volumes, pve-qemu to old (< 4.2)!\n"
459 }
460
461 my $live_replicatable_volumes = {};
462 PVE::QemuServer::foreach_drive($conf, sub {
463 my ($ds, $drive) = @_;
464
465 my $volid = $drive->{file};
466 $live_replicatable_volumes->{$ds} = $volid
467 if defined($replicatable_volumes->{$volid});
468 });
469 foreach my $drive (keys %$live_replicatable_volumes) {
470 my $volid = $live_replicatable_volumes->{$drive};
471
472 my $bitmap = "repl_$drive";
473
474 # start tracking before replication to get full delta + a few duplicates
475 $self->log('info', "$drive: start tracking writes using block-dirty-bitmap '$bitmap'");
476 mon_cmd($vmid, 'block-dirty-bitmap-add', node => "drive-$drive", name => $bitmap);
477
478 # other info comes from target node in phase 2
479 $self->{target_drive}->{$drive}->{bitmap} = $bitmap;
480 }
481 }
482 $self->log('info', "replicating disk images");
483
484 my $start_time = time();
485 my $logfunc = sub { $self->log('info', shift) };
486 $self->{replicated_volumes} = PVE::Replication::run_replication(
487 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc);
488 }
489
490 # sizes in config have to be accurate for remote node to correctly
491 # allocate disks, rescan to be sure
492 my $volid_hash = PVE::QemuServer::scan_volids($self->{storecfg}, $vmid);
493 PVE::QemuServer::foreach_drive($conf, sub {
494 my ($key, $drive) = @_;
495 my ($updated, $old_size, $new_size) = PVE::QemuServer::Drive::update_disksize($drive, $volid_hash);
496 if (defined($updated)) {
497 $conf->{$key} = PVE::QemuServer::print_drive($updated);
498 $self->log('info', "size of disk '$updated->{file}' ($key) updated from $old_size to $new_size\n");
499 }
500 });
501
502 $self->log('info', "copying local disk images") if scalar(%$local_volumes);
503
504 foreach my $volid (keys %$local_volumes) {
505 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
506 my $targetsid = $override_targetsid // $sid;
507 my $ref = $local_volumes->{$volid}->{ref};
508 if ($self->{running} && $ref eq 'config') {
509 push @{$self->{online_local_volumes}}, $volid;
510 } elsif ($ref eq 'generated') {
511 die "can't live migrate VM with local cloudinit disk. use a shared storage instead\n" if $self->{running};
512 # skip all generated volumes but queue them for deletion in phase3_cleanup
513 push @{$self->{volumes}}, $volid;
514 next;
515 } else {
516 next if $self->{replicated_volumes}->{$volid};
517 push @{$self->{volumes}}, $volid;
518 my $opts = $self->{opts};
519 my $insecure = $opts->{migration_type} eq 'insecure';
520 my $with_snapshots = $local_volumes->{$volid}->{snapshots};
521 # use 'migrate' limit for transfer to other node
522 my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', [$targetsid, $sid], $opts->{bwlimit});
523 # JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps
524 $bwlimit = $bwlimit * 1024 if defined($bwlimit);
525
526 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid,
527 undef, undef, undef, $bwlimit, $insecure, $with_snapshots);
528 }
529 }
530 };
531 die "Failed to sync data - $@" if $@;
532 }
533
534 sub cleanup_remotedisks {
535 my ($self) = @_;
536
537 foreach my $target_drive (keys %{$self->{target_drive}}) {
538 # don't clean up replicated disks!
539 next if defined($self->{target_drive}->{$target_drive}->{bitmap});
540
541 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{drivestr});
542 my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
543
544 my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
545
546 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
547 if (my $err = $@) {
548 $self->log('err', $err);
549 $self->{errors} = 1;
550 }
551 }
552 }
553
554 sub cleanup_bitmaps {
555 my ($self) = @_;
556 foreach my $drive (%{$self->{target_drive}}) {
557 my $bitmap = $self->{target_drive}->{$drive}->{bitmap};
558 next if !$bitmap;
559 $self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'");
560 mon_cmd($self->{vmid}, 'block-dirty-bitmap-remove', node => "drive-$drive", name => $bitmap);
561 }
562 }
563
564 sub phase1 {
565 my ($self, $vmid) = @_;
566
567 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
568
569 my $conf = $self->{vmconf};
570
571 # set migrate lock in config file
572 $conf->{lock} = 'migrate';
573 PVE::QemuConfig->write_config($vmid, $conf);
574
575 sync_disks($self, $vmid);
576
577 # sync_disks fixes disk sizes to match their actual size, write changes so
578 # target allocates correct volumes
579 PVE::QemuConfig->write_config($vmid, $conf);
580 };
581
582 sub phase1_cleanup {
583 my ($self, $vmid, $err) = @_;
584
585 $self->log('info', "aborting phase 1 - cleanup resources");
586
587 my $conf = $self->{vmconf};
588 delete $conf->{lock};
589 eval { PVE::QemuConfig->write_config($vmid, $conf) };
590 if (my $err = $@) {
591 $self->log('err', $err);
592 }
593
594 if ($self->{volumes}) {
595 foreach my $volid (@{$self->{volumes}}) {
596 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
597 # fixme: try to remove ?
598 }
599 }
600
601 eval { $self->cleanup_bitmaps() };
602 if (my $err =$@) {
603 $self->log('err', $err);
604 }
605
606 }
607
608 sub phase2 {
609 my ($self, $vmid) = @_;
610
611 my $conf = $self->{vmconf};
612
613 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
614
615 my $raddr;
616 my $rport;
617 my $ruri; # the whole migration dst. URI (protocol:address[:port])
618 my $nodename = PVE::INotify::nodename();
619
620 ## start on remote node
621 my $cmd = [@{$self->{rem_ssh}}];
622
623 my $spice_ticket;
624 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
625 my $res = mon_cmd($vmid, 'query-spice');
626 $spice_ticket = $res->{ticket};
627 }
628
629 push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
630
631 my $migration_type = $self->{opts}->{migration_type};
632
633 push @$cmd, '--migration_type', $migration_type;
634
635 push @$cmd, '--migration_network', $self->{opts}->{migration_network}
636 if $self->{opts}->{migration_network};
637
638 if ($migration_type eq 'insecure') {
639 push @$cmd, '--stateuri', 'tcp';
640 } else {
641 push @$cmd, '--stateuri', 'unix';
642 }
643
644 if ($self->{forcemachine}) {
645 push @$cmd, '--machine', $self->{forcemachine};
646 }
647
648 if ($self->{online_local_volumes}) {
649 push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1');
650 }
651
652 my $spice_port;
653 my $tunnel_addr = [];
654 my $sock_addr = [];
655 # version > 0 for unix socket support
656 my $nbd_protocol_version = 1;
657 # TODO change to 'spice_ticket: <ticket>\n' in 7.0
658 my $input = $spice_ticket ? "$spice_ticket\n" : "\n";
659 $input .= "nbd_protocol_version: $nbd_protocol_version\n";
660
661 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
662 # instead we pipe it through STDIN
663 my $exitcode = PVE::Tools::run_command($cmd, input => $input, outfunc => sub {
664 my $line = shift;
665
666 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
667 $raddr = $1;
668 $rport = int($2);
669 $ruri = "tcp:$raddr:$rport";
670 }
671 elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
672 $raddr = $1;
673 die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
674 $ruri = "unix:$raddr";
675 }
676 elsif ($line =~ m/^migration listens on port (\d+)$/) {
677 $raddr = "localhost";
678 $rport = int($1);
679 $ruri = "tcp:$raddr:$rport";
680 }
681 elsif ($line =~ m/^spice listens on port (\d+)$/) {
682 $spice_port = int($1);
683 }
684 elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
685 my $drivestr = $4;
686 my $nbd_uri = "nbd:$1:$2:exportname=$3";
687 my $targetdrive = $3;
688 $targetdrive =~ s/drive-//g;
689
690 $self->{target_drive}->{$targetdrive}->{drivestr} = $drivestr;
691 $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
692 } elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) {
693 my $drivestr = $4;
694 die "Destination UNIX socket's VMID does not match source VMID" if $vmid ne $2;
695 my $nbd_unix_addr = $1;
696 my $nbd_uri = "nbd:unix:$nbd_unix_addr:exportname=$3";
697 my $targetdrive = $3;
698 $targetdrive =~ s/drive-//g;
699
700 $self->{target_drive}->{$targetdrive}->{drivestr} = $drivestr;
701 $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
702 push @$tunnel_addr, "$nbd_unix_addr:$nbd_unix_addr";
703 push @$sock_addr, $nbd_unix_addr;
704 } elsif ($line =~ m/^QEMU: (.*)$/) {
705 $self->log('info', "[$self->{node}] $1\n");
706 }
707 }, errfunc => sub {
708 my $line = shift;
709 $self->log('info', "[$self->{node}] $line");
710 }, noerr => 1);
711
712 die "remote command failed with exit code $exitcode\n" if $exitcode;
713
714 die "unable to detect remote migration address\n" if !$raddr;
715
716 $self->log('info', "start remote tunnel");
717
718 if ($migration_type eq 'secure') {
719
720 if ($ruri =~ /^unix:/) {
721 unlink $raddr;
722 push @$tunnel_addr, "$raddr:$raddr";
723 $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
724 push @$sock_addr, $raddr;
725
726 my $unix_socket_try = 0; # wait for the socket to become ready
727 while ($unix_socket_try <= 100) {
728 $unix_socket_try++;
729 my $available = 0;
730 foreach my $sock (@$sock_addr) {
731 if (-S $sock) {
732 $available++;
733 }
734 }
735
736 if ($available == @$sock_addr) {
737 last;
738 }
739
740 usleep(50000);
741 }
742 if ($unix_socket_try > 100) {
743 $self->{errors} = 1;
744 $self->finish_tunnel($self->{tunnel});
745 die "Timeout, migration socket $ruri did not get ready";
746 }
747
748 } elsif ($ruri =~ /^tcp:/) {
749 my $tunnel_addr;
750 if ($raddr eq "localhost") {
751 # for backwards compatibility with older qemu-server versions
752 my $pfamily = PVE::Tools::get_host_address_family($nodename);
753 my $lport = PVE::Tools::next_migrate_port($pfamily);
754 $tunnel_addr = "$lport:localhost:$rport";
755 }
756
757 $self->{tunnel} = $self->fork_tunnel($tunnel_addr);
758
759 } else {
760 die "unsupported protocol in migration URI: $ruri\n";
761 }
762 } else {
763 #fork tunnel for insecure migration, to send faster commands like resume
764 $self->{tunnel} = $self->fork_tunnel();
765 }
766 $self->{tunnel}->{sock_addr} = $sock_addr if (@$sock_addr);
767
768 my $start = time();
769
770 my $opt_bwlimit = $self->{opts}->{bwlimit};
771
772 if (defined($self->{online_local_volumes})) {
773 $self->{storage_migration} = 1;
774 $self->{storage_migration_jobs} = {};
775 $self->log('info', "starting storage migration");
776
777 die "The number of local disks does not match between the source and the destination.\n"
778 if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
779 foreach my $drive (keys %{$self->{target_drive}}){
780 my $target = $self->{target_drive}->{$drive};
781 my $nbd_uri = $target->{nbd_uri};
782
783 my $source_drive = PVE::QemuServer::parse_drive($drive, $conf->{$drive});
784 my $target_drive = PVE::QemuServer::parse_drive($drive, $target->{drivestr});
785
786 my $source_sid = PVE::Storage::Plugin::parse_volume_id($source_drive->{file});
787 my $target_sid = PVE::Storage::Plugin::parse_volume_id($target_drive->{file});
788
789 my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', [$source_sid, $target_sid], $opt_bwlimit);
790 my $bitmap = $target->{bitmap};
791
792 $self->log('info', "$drive: start migration to $nbd_uri");
793 PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 'skip', undef, $bwlimit, $bitmap);
794 }
795 }
796
797 $self->log('info', "starting online/live migration on $ruri");
798 $self->{livemigration} = 1;
799
800 # load_defaults
801 my $defaults = PVE::QemuServer::load_defaults();
802
803 $self->log('info', "set migration_caps");
804 eval {
805 PVE::QemuServer::set_migration_caps($vmid);
806 };
807 warn $@ if $@;
808
809 my $qemu_migrate_params = {};
810
811 # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
812 # migrate_speed parameter in qm.conf - take the lower of the two.
813 my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', undef, $opt_bwlimit) // 0;
814 my $migrate_speed = $conf->{migrate_speed} // $bwlimit;
815 # migrate_speed is in MB/s, bwlimit in KB/s
816 $migrate_speed *= 1024;
817
818 $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed;
819
820 # always set migrate speed (overwrite kvm default of 32m) we set a very high
821 # default of 8192m which is basically unlimited
822 $migrate_speed ||= ($defaults->{migrate_speed} || 8192) * 1024;
823
824 # qmp takes migrate_speed in B/s.
825 $migrate_speed *= 1024;
826 $self->log('info', "migration speed limit: $migrate_speed B/s");
827 $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
828
829 my $migrate_downtime = $defaults->{migrate_downtime};
830 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
831 if (defined($migrate_downtime)) {
832 # migrate-set-parameters expects limit in ms
833 $migrate_downtime *= 1000;
834 $self->log('info', "migration downtime limit: $migrate_downtime ms");
835 $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
836 }
837
838 # set cachesize to 10% of the total memory
839 my $memory = $conf->{memory} || $defaults->{memory};
840 my $cachesize = int($memory * 1048576 / 10);
841 $cachesize = round_powerof2($cachesize);
842
843 $self->log('info', "migration cachesize: $cachesize B");
844 $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
845
846 $self->log('info', "set migration parameters");
847 eval {
848 mon_cmd($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
849 };
850 $self->log('info', "migrate-set-parameters error: $@") if $@;
851
852 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
853 my $rpcenv = PVE::RPCEnvironment::get();
854 my $authuser = $rpcenv->get_user();
855
856 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
857
858 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
859 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
860
861 $self->log('info', "spice client_migrate_info");
862
863 eval {
864 mon_cmd($vmid, "client_migrate_info", protocol => 'spice',
865 hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
866 'cert-subject' => $subject);
867 };
868 $self->log('info', "client_migrate_info error: $@") if $@;
869
870 }
871
872 $self->log('info', "start migrate command to $ruri");
873 eval {
874 mon_cmd($vmid, "migrate", uri => $ruri);
875 };
876 my $merr = $@;
877 $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
878
879 my $lstat = 0;
880 my $usleep = 1000000;
881 my $i = 0;
882 my $err_count = 0;
883 my $lastrem = undef;
884 my $downtimecounter = 0;
885 while (1) {
886 $i++;
887 my $avglstat = $lstat/$i if $lstat;
888
889 usleep($usleep);
890 my $stat;
891 eval {
892 $stat = mon_cmd($vmid, "query-migrate");
893 };
894 if (my $err = $@) {
895 $err_count++;
896 warn "query migrate failed: $err\n";
897 $self->log('info', "query migrate failed: $err");
898 if ($err_count <= 5) {
899 usleep(1000000);
900 next;
901 }
902 die "too many query migrate failures - aborting\n";
903 }
904
905 if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) {
906 sleep(1);
907 next;
908 }
909
910 if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
911 $merr = undef;
912 $err_count = 0;
913 if ($stat->{status} eq 'completed') {
914 my $delay = time() - $start;
915 if ($delay > 0) {
916 my $mbps = sprintf "%.2f", $memory / $delay;
917 my $downtime = $stat->{downtime} || 0;
918 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
919 }
920 }
921
922 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
923 $self->log('info', "migration status error: $stat->{status}");
924 die "aborting\n"
925 }
926
927 if ($stat->{status} ne 'active') {
928 $self->log('info', "migration status: $stat->{status}");
929 last;
930 }
931
932 if ($stat->{ram}->{transferred} ne $lstat) {
933 my $trans = $stat->{ram}->{transferred} || 0;
934 my $rem = $stat->{ram}->{remaining} || 0;
935 my $total = $stat->{ram}->{total} || 0;
936 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
937 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
938 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
939 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
940 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
941 # reduce sleep if remainig memory is lower than the average transfer speed
942 $usleep = 100000 if $avglstat && $rem < $avglstat;
943
944 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
945 "remaining ${rem}), total ${total})");
946
947 if (${xbzrlecachesize}) {
948 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
949 }
950
951 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
952 $downtimecounter++;
953 }
954 $lastrem = $rem;
955
956 if ($downtimecounter > 5) {
957 $downtimecounter = 0;
958 $migrate_downtime *= 2;
959 $self->log('info', "migrate_set_downtime: $migrate_downtime");
960 eval {
961 mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
962 };
963 $self->log('info', "migrate_set_downtime error: $@") if $@;
964 }
965
966 }
967
968
969 $lstat = $stat->{ram}->{transferred};
970
971 } else {
972 die $merr if $merr;
973 die "unable to parse migration status '$stat->{status}' - aborting\n";
974 }
975 }
976 }
977
978 sub phase2_cleanup {
979 my ($self, $vmid, $err) = @_;
980
981 return if !$self->{errors};
982 $self->{phase2errors} = 1;
983
984 $self->log('info', "aborting phase 2 - cleanup resources");
985
986 $self->log('info', "migrate_cancel");
987 eval {
988 mon_cmd($vmid, "migrate_cancel");
989 };
990 $self->log('info', "migrate_cancel error: $@") if $@;
991
992 my $conf = $self->{vmconf};
993 delete $conf->{lock};
994 eval { PVE::QemuConfig->write_config($vmid, $conf) };
995 if (my $err = $@) {
996 $self->log('err', $err);
997 }
998
999 # cleanup ressources on target host
1000 if ($self->{storage_migration}) {
1001
1002 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
1003 if (my $err = $@) {
1004 $self->log('err', $err);
1005 }
1006
1007 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
1008 if (my $err = $@) {
1009 $self->log('err', $err);
1010 }
1011 eval { $self->cleanup_bitmaps() };
1012 if (my $err =$@) {
1013 $self->log('err', $err);
1014 }
1015 }
1016
1017 my $nodename = PVE::INotify::nodename();
1018
1019 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
1020 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1021 if (my $err = $@) {
1022 $self->log('err', $err);
1023 $self->{errors} = 1;
1024 }
1025
1026 if ($self->{tunnel}) {
1027 eval { finish_tunnel($self, $self->{tunnel}); };
1028 if (my $err = $@) {
1029 $self->log('err', $err);
1030 $self->{errors} = 1;
1031 }
1032 }
1033 }
1034
1035 sub phase3 {
1036 my ($self, $vmid) = @_;
1037
1038 my $volids = $self->{volumes};
1039 return if $self->{phase2errors};
1040
1041 # destroy local copies
1042 foreach my $volid (@$volids) {
1043 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
1044 if (my $err = $@) {
1045 $self->log('err', "removing local copy of '$volid' failed - $err");
1046 $self->{errors} = 1;
1047 last if $err =~ /^interrupted by signal$/;
1048 }
1049 }
1050 }
1051
1052 sub phase3_cleanup {
1053 my ($self, $vmid, $err) = @_;
1054
1055 my $conf = $self->{vmconf};
1056 return if $self->{phase2errors};
1057
1058 my $tunnel = $self->{tunnel};
1059
1060 if ($self->{storage_migration}) {
1061 # finish block-job with block-job-cancel, to disconnect source VM from NBD
1062 # to avoid it trying to re-establish it. We are in blockjob ready state,
1063 # thus, this command changes to it to blockjob complete (see qapi docs)
1064 eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, 'cancel'); };
1065
1066 if (my $err = $@) {
1067 eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
1068 eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
1069 die "Failed to complete storage migration: $err\n";
1070 } else {
1071 foreach my $target_drive (keys %{$self->{target_drive}}) {
1072 my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{drivestr});
1073 $conf->{$target_drive} = PVE::QemuServer::print_drive($drive);
1074 PVE::QemuConfig->write_config($vmid, $conf);
1075 }
1076 }
1077 }
1078
1079 # transfer replication state before move config
1080 $self->transfer_replication_state() if $self->{replicated_volumes};
1081
1082 # move config to remote node
1083 my $conffile = PVE::QemuConfig->config_file($vmid);
1084 my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
1085
1086 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
1087 if !rename($conffile, $newconffile);
1088
1089 $self->switch_replication_job_target() if $self->{replicated_volumes};
1090
1091 if ($self->{livemigration}) {
1092 if ($self->{storage_migration}) {
1093 # stop nbd server on remote vm - requirement for resume since 2.9
1094 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
1095
1096 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1097 if (my $err = $@) {
1098 $self->log('err', $err);
1099 $self->{errors} = 1;
1100 }
1101 }
1102
1103 # config moved and nbd server stopped - now we can resume vm on target
1104 if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) {
1105 eval {
1106 $self->write_tunnel($tunnel, 30, "resume $vmid");
1107 };
1108 if (my $err = $@) {
1109 $self->log('err', $err);
1110 $self->{errors} = 1;
1111 }
1112 } else {
1113 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
1114 my $logf = sub {
1115 my $line = shift;
1116 $self->log('err', $line);
1117 };
1118 eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); };
1119 if (my $err = $@) {
1120 $self->log('err', $err);
1121 $self->{errors} = 1;
1122 }
1123 }
1124
1125 if ($self->{storage_migration} && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && $self->{running}) {
1126 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim'];
1127 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
1128 }
1129 }
1130
1131 # close tunnel on successful migration, on error phase2_cleanup closed it
1132 if ($tunnel) {
1133 eval { finish_tunnel($self, $tunnel); };
1134 if (my $err = $@) {
1135 $self->log('err', $err);
1136 $self->{errors} = 1;
1137 }
1138 }
1139
1140 eval {
1141 my $timer = 0;
1142 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
1143 $self->log('info', "Waiting for spice server migration");
1144 while (1) {
1145 my $res = mon_cmd($vmid, 'query-spice');
1146 last if int($res->{'migrated'}) == 1;
1147 last if $timer > 50;
1148 $timer ++;
1149 usleep(200000);
1150 }
1151 }
1152 };
1153
1154 # always stop local VM
1155 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
1156 if (my $err = $@) {
1157 $self->log('err', "stopping vm failed - $err");
1158 $self->{errors} = 1;
1159 }
1160
1161 # always deactivate volumes - avoid lvm LVs to be active on several nodes
1162 eval {
1163 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
1164 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
1165 };
1166 if (my $err = $@) {
1167 $self->log('err', $err);
1168 $self->{errors} = 1;
1169 }
1170
1171 if($self->{storage_migration}) {
1172 # destroy local copies
1173 my $volids = $self->{online_local_volumes};
1174
1175 foreach my $volid (@$volids) {
1176 # keep replicated volumes!
1177 next if $self->{replicated_volumes}->{$volid};
1178
1179 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
1180 if (my $err = $@) {
1181 $self->log('err', "removing local copy of '$volid' failed - $err");
1182 $self->{errors} = 1;
1183 last if $err =~ /^interrupted by signal$/;
1184 }
1185 }
1186
1187 }
1188
1189 # clear migrate lock
1190 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
1191 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
1192 }
1193
1194 sub final_cleanup {
1195 my ($self, $vmid) = @_;
1196
1197 # nothing to do
1198 }
1199
1200 sub round_powerof2 {
1201 return 1 if $_[0] < 2;
1202 return 2 << int(log($_[0]-1)/log(2));
1203 }
1204
1205 1;