]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
migration: put the source address in brackets
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use PVE::INotify;
9 use PVE::Tools;
10 use PVE::Cluster;
11 use PVE::Storage;
12 use PVE::QemuServer;
13 use Time::HiRes qw( usleep );
14 use PVE::RPCEnvironment;
15
16 use base qw(PVE::AbstractMigrate);
17
18 sub fork_command_pipe {
19 my ($self, $cmd) = @_;
20
21 my $reader = IO::File->new();
22 my $writer = IO::File->new();
23
24 my $orig_pid = $$;
25
26 my $cpid;
27
28 eval { $cpid = open2($reader, $writer, @$cmd); };
29
30 my $err = $@;
31
32 # catch exec errors
33 if ($orig_pid != $$) {
34 $self->log('err', "can't fork command pipe\n");
35 POSIX::_exit(1);
36 kill('KILL', $$);
37 }
38
39 die $err if $err;
40
41 return { writer => $writer, reader => $reader, pid => $cpid };
42 }
43
44 sub finish_command_pipe {
45 my ($self, $cmdpipe, $timeout) = @_;
46
47 my $writer = $cmdpipe->{writer};
48 my $reader = $cmdpipe->{reader};
49
50 $writer->close();
51 $reader->close();
52
53 my $cpid = $cmdpipe->{pid};
54
55 if ($timeout) {
56 for (my $i = 0; $i < $timeout; $i++) {
57 return if !PVE::ProcFSTools::check_process_running($cpid);
58 sleep(1);
59 }
60 }
61
62 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
63 kill(15, $cpid);
64
65 # wait again
66 for (my $i = 0; $i < 10; $i++) {
67 return if !PVE::ProcFSTools::check_process_running($cpid);
68 sleep(1);
69 }
70
71 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
72 kill 9, $cpid;
73 sleep 1;
74 }
75
76 sub fork_tunnel {
77 my ($self, $nodeip, $lport, $rport) = @_;
78
79 my @localtunnelinfo = $lport ? ('-L' , "$lport:localhost:$rport" ) : ();
80
81 my $cmd = [@{$self->{rem_ssh}}, @localtunnelinfo, 'qm', 'mtunnel' ];
82
83 my $tunnel = $self->fork_command_pipe($cmd);
84
85 my $reader = $tunnel->{reader};
86
87 my $helo;
88 eval {
89 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
90 die "no reply\n" if !$helo;
91 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
92 die "got strange reply from mtunnel ('$helo')\n"
93 if $helo !~ m/^tunnel online$/;
94 };
95 my $err = $@;
96
97 if ($err) {
98 $self->finish_command_pipe($tunnel);
99 die "can't open migration tunnel - $err";
100 }
101 return $tunnel;
102 }
103
104 sub finish_tunnel {
105 my ($self, $tunnel) = @_;
106
107 my $writer = $tunnel->{writer};
108
109 eval {
110 PVE::Tools::run_with_timeout(30, sub {
111 print $writer "quit\n";
112 $writer->flush();
113 });
114 };
115 my $err = $@;
116
117 $self->finish_command_pipe($tunnel, 30);
118
119 die $err if $err;
120 }
121
122 sub lock_vm {
123 my ($self, $vmid, $code, @param) = @_;
124
125 return PVE::QemuServer::lock_config($vmid, $code, @param);
126 }
127
128 sub prepare {
129 my ($self, $vmid) = @_;
130
131 my $online = $self->{opts}->{online};
132
133 $self->{storecfg} = PVE::Storage::config();
134
135 # test is VM exist
136 my $conf = $self->{vmconf} = PVE::QemuServer::load_config($vmid);
137
138 PVE::QemuServer::check_lock($conf);
139
140 my $running = 0;
141 if (my $pid = PVE::QemuServer::check_running($vmid)) {
142 die "cant migrate running VM without --online\n" if !$online;
143 $running = $pid;
144 $self->{forcemachine} = PVE::QemuServer::get_current_qemu_machine($vmid);
145 }
146
147 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
148 if ($self->{running} || !$self->{opts}->{force}) {
149 die "can't migrate VM which uses local devices\n";
150 } else {
151 $self->log('info', "migrating VM which uses local devices");
152 }
153 }
154
155 # activate volumes
156 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
157 PVE::Storage::activate_volumes($self->{storecfg}, $vollist);
158
159 # fixme: check if storage is available on both nodes
160
161 # test ssh connection
162 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
163 eval { $self->cmd_quiet($cmd); };
164 die "Can't connect to destination address using public key\n" if $@;
165
166 return $running;
167 }
168
169 sub sync_disks {
170 my ($self, $vmid) = @_;
171
172 $self->log('info', "copying disk images");
173
174 my $conf = $self->{vmconf};
175
176 $self->{volumes} = [];
177
178 my $res = [];
179
180 eval {
181
182 my $volhash = {};
183 my $cdromhash = {};
184
185 my $sharedvm = 1;
186
187 my @sids = PVE::Storage::storage_ids($self->{storecfg});
188 foreach my $storeid (@sids) {
189 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
190 next if $scfg->{shared};
191 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
192
193 # get list from PVE::Storage (for unused volumes)
194 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
195 PVE::Storage::foreach_volid($dl, sub {
196 my ($volid, $sid, $volname) = @_;
197
198 # check if storage is available on target node
199 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
200
201 $volhash->{$volid} = 1;
202 $sharedvm = 0; # there is a non-shared disk
203 });
204 }
205
206 # and add used, owned/non-shared disks (just to be sure we have all)
207
208 PVE::QemuServer::foreach_volid($conf, sub {
209 my ($volid, $is_cdrom) = @_;
210
211 return if !$volid;
212
213 die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
214
215 if ($is_cdrom) {
216 die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
217 return if $volid eq 'none';
218 $cdromhash->{$volid} = 1;
219 }
220
221 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
222
223 # check if storage is available on both nodes
224 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
225 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
226
227 return if $scfg->{shared};
228
229 die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
230
231 $sharedvm = 0;
232
233 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
234
235 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
236 if !$owner || ($owner != $self->{vmid});
237
238 $volhash->{$volid} = 1;
239 });
240
241 if ($self->{running} && !$sharedvm) {
242 die "can't do online migration - VM uses local disks\n";
243 }
244
245 # do some checks first
246 foreach my $volid (keys %$volhash) {
247 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
248 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
249
250 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
251 if (!($scfg->{type} eq 'dir' || $scfg->{type} eq 'zfspool') && (!$sharedvm));
252
253 # if file, check if a backing file exist
254 if (!($scfg->{type} eq 'dir' || $scfg->{type} eq 'zfspool') && (!$sharedvm)) {
255 my (undef, undef, undef, $parent) = PVE::Storage::volume_size_info($self->{storecfg}, $volid, 1);
256 die "can't migrate '$volid' as it's a clone of '$parent'" if $parent;
257 }
258 }
259
260 foreach my $volid (keys %$volhash) {
261 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
262 push @{$self->{volumes}}, $volid;
263 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
264 }
265 };
266 die "Failed to sync data - $@" if $@;
267 }
268
269 sub phase1 {
270 my ($self, $vmid) = @_;
271
272 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
273
274 my $conf = $self->{vmconf};
275
276 # set migrate lock in config file
277 $conf->{lock} = 'migrate';
278 PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
279
280 sync_disks($self, $vmid);
281
282 };
283
284 sub phase1_cleanup {
285 my ($self, $vmid, $err) = @_;
286
287 $self->log('info', "aborting phase 1 - cleanup resources");
288
289 my $conf = $self->{vmconf};
290 delete $conf->{lock};
291 eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) };
292 if (my $err = $@) {
293 $self->log('err', $err);
294 }
295
296 if ($self->{volumes}) {
297 foreach my $volid (@{$self->{volumes}}) {
298 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
299 # fixme: try to remove ?
300 }
301 }
302 }
303
304 sub phase2 {
305 my ($self, $vmid) = @_;
306
307 my $conf = $self->{vmconf};
308
309 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
310
311 my $raddr;
312 my $rport;
313 my $nodename = PVE::INotify::nodename();
314
315 ## start on remote node
316 my $cmd = [@{$self->{rem_ssh}}];
317
318 my $spice_ticket;
319 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
320 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
321 $spice_ticket = $res->{ticket};
322 }
323
324 push @$cmd , 'qm', 'start', $vmid, '--stateuri', 'tcp', '--skiplock', '--migratedfrom', $nodename;
325
326 if ($self->{forcemachine}) {
327 push @$cmd, '--machine', $self->{forcemachine};
328 }
329
330 my $spice_port;
331
332 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
333 # instead we pipe it through STDIN
334 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
335 my $line = shift;
336
337 if ($line =~ m/^migration listens on tcp:\[([\d\.:a-fA-F]+|localhost)\]:(\d+)$/) {
338 $raddr = $1;
339 $rport = int($2);
340 }
341 elsif ($line =~ m/^migration listens on port (\d+)$/) {
342 $raddr = "localhost";
343 $rport = int($1);
344 }
345 elsif ($line =~ m/^spice listens on port (\d+)$/) {
346 $spice_port = int($1);
347 }
348 }, errfunc => sub {
349 my $line = shift;
350 $self->log('info', $line);
351 });
352
353 die "unable to detect remote migration address\n" if !$raddr;
354
355 ## create tunnel to remote port
356 $self->log('info', "starting ssh migration tunnel");
357 my $pfamily = PVE::Tools::get_host_address_family($nodename);
358 my $lport = ($raddr eq "localhost") ? PVE::Tools::next_migrate_port($pfamily) : undef;
359 $self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport);
360
361 my $start = time();
362 $self->log('info', "starting online/live migration on $raddr:$rport");
363 $self->{livemigration} = 1;
364
365 # load_defaults
366 my $defaults = PVE::QemuServer::load_defaults();
367
368 # always set migrate speed (overwrite kvm default of 32m)
369 # we set a very hight default of 8192m which is basically unlimited
370 my $migrate_speed = $defaults->{migrate_speed} || 8192;
371 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
372 $migrate_speed = $migrate_speed * 1048576;
373 $self->log('info', "migrate_set_speed: $migrate_speed");
374 eval {
375 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
376 };
377 $self->log('info', "migrate_set_speed error: $@") if $@;
378
379 my $migrate_downtime = $defaults->{migrate_downtime};
380 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
381 if (defined($migrate_downtime)) {
382 $self->log('info', "migrate_set_downtime: $migrate_downtime");
383 eval {
384 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
385 };
386 $self->log('info', "migrate_set_downtime error: $@") if $@;
387 }
388
389 eval {
390 PVE::QemuServer::set_migration_caps($vmid);
391 };
392 warn $@ if $@;
393
394 #set cachesize 10% of the total memory
395 my $cachesize = int($conf->{memory}*1048576/10);
396 eval {
397 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => $cachesize);
398 };
399
400 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
401 my $rpcenv = PVE::RPCEnvironment::get();
402 my $authuser = $rpcenv->get_user();
403
404 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
405
406 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
407 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
408
409 $self->log('info', "spice client_migrate_info");
410
411 eval {
412 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
413 hostname => $proxyticket, 'tls-port' => $spice_port,
414 'cert-subject' => $subject);
415 };
416 $self->log('info', "client_migrate_info error: $@") if $@;
417
418 }
419
420 eval {
421 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:[$raddr]:$rport");
422 };
423 my $merr = $@;
424 $self->log('info', "migrate uri => tcp:[$raddr]:$rport failed: $merr") if $merr;
425
426 my $lstat = 0;
427 my $usleep = 2000000;
428 my $i = 0;
429 my $err_count = 0;
430 my $lastrem = undef;
431 my $downtimecounter = 0;
432 while (1) {
433 $i++;
434 my $avglstat = $lstat/$i if $lstat;
435
436 usleep($usleep);
437 my $stat;
438 eval {
439 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
440 };
441 if (my $err = $@) {
442 $err_count++;
443 warn "query migrate failed: $err\n";
444 if ($err_count <= 5) {
445 usleep(1000000);
446 next;
447 }
448 die "too many query migrate failures - aborting\n";
449 }
450
451 if ($stat->{status} =~ m/^(setup)$/im) {
452 sleep(1);
453 next;
454 }
455
456 if ($stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
457 $merr = undef;
458 $err_count = 0;
459 if ($stat->{status} eq 'completed') {
460 my $delay = time() - $start;
461 if ($delay > 0) {
462 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
463 my $downtime = $stat->{downtime} || 0;
464 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
465 }
466 }
467
468 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
469 die "aborting\n"
470 }
471
472 if ($stat->{status} ne 'active') {
473 $self->log('info', "migration status: $stat->{status}");
474 last;
475 }
476
477 if ($stat->{ram}->{transferred} ne $lstat) {
478 my $trans = $stat->{ram}->{transferred} || 0;
479 my $rem = $stat->{ram}->{remaining} || 0;
480 my $total = $stat->{ram}->{total} || 0;
481 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
482 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
483 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
484 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
485 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
486 #reduce sleep if remainig memory if lower than the everage transfert
487 $usleep = 300000 if $avglstat && $rem < $avglstat;
488
489 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
490 "remaining ${rem}), total ${total})");
491
492 if (${xbzrlecachesize}) {
493 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
494 }
495
496 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
497 $downtimecounter++;
498 }
499 $lastrem = $rem;
500
501 if ($downtimecounter > 5) {
502 $downtimecounter = 0;
503 $migrate_downtime *= 2;
504 $self->log('info', "migrate_set_downtime: $migrate_downtime");
505 eval {
506 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
507 };
508 $self->log('info', "migrate_set_downtime error: $@") if $@;
509 }
510
511 }
512
513
514 $lstat = $stat->{ram}->{transferred};
515
516 } else {
517 die $merr if $merr;
518 die "unable to parse migration status '$stat->{status}' - aborting\n";
519 }
520 }
521 }
522
523 sub phase2_cleanup {
524 my ($self, $vmid, $err) = @_;
525
526 return if !$self->{errors};
527 $self->{phase2errors} = 1;
528
529 $self->log('info', "aborting phase 2 - cleanup resources");
530
531 $self->log('info', "migrate_cancel");
532 eval {
533 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
534 };
535 $self->log('info', "migrate_cancel error: $@") if $@;
536
537 my $conf = $self->{vmconf};
538 delete $conf->{lock};
539 eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) };
540 if (my $err = $@) {
541 $self->log('err', $err);
542 }
543
544 # cleanup ressources on target host
545 my $nodename = PVE::INotify::nodename();
546
547 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
548 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
549 if (my $err = $@) {
550 $self->log('err', $err);
551 $self->{errors} = 1;
552 }
553 }
554
555 sub phase3 {
556 my ($self, $vmid) = @_;
557
558 my $volids = $self->{volumes};
559 return if $self->{phase2errors};
560
561 # destroy local copies
562 foreach my $volid (@$volids) {
563 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
564 if (my $err = $@) {
565 $self->log('err', "removing local copy of '$volid' failed - $err");
566 $self->{errors} = 1;
567 last if $err =~ /^interrupted by signal$/;
568 }
569 }
570 }
571
572 sub phase3_cleanup {
573 my ($self, $vmid, $err) = @_;
574
575 my $conf = $self->{vmconf};
576 return if $self->{phase2errors};
577
578 # move config to remote node
579 my $conffile = PVE::QemuServer::config_file($vmid);
580 my $newconffile = PVE::QemuServer::config_file($vmid, $self->{node});
581
582 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
583 if !rename($conffile, $newconffile);
584
585 if ($self->{livemigration}) {
586 # now that config file is move, we can resume vm on target if livemigrate
587 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock'];
588 eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
589 errfunc => sub {
590 my $line = shift;
591 $self->log('err', $line);
592 });
593 };
594 if (my $err = $@) {
595 $self->log('err', $err);
596 $self->{errors} = 1;
597 }
598 }
599
600 eval {
601
602 my $timer = 0;
603 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
604 $self->log('info', "Waiting for spice server migration");
605 while (1) {
606 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
607 last if int($res->{'migrated'}) == 1;
608 last if $timer > 50;
609 $timer ++;
610 usleep(200000);
611 }
612 }
613 };
614
615 # always stop local VM
616 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
617 if (my $err = $@) {
618 $self->log('err', "stopping vm failed - $err");
619 $self->{errors} = 1;
620 }
621
622 if ($self->{tunnel}) {
623 eval { finish_tunnel($self, $self->{tunnel}); };
624 if (my $err = $@) {
625 $self->log('err', $err);
626 $self->{errors} = 1;
627 }
628 }
629
630 # always deactivate volumes - avoid lvm LVs to be active on several nodes
631 eval {
632 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
633 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
634 };
635 if (my $err = $@) {
636 $self->log('err', $err);
637 $self->{errors} = 1;
638 }
639
640 # clear migrate lock
641 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
642 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
643 }
644
645 sub final_cleanup {
646 my ($self, $vmid) = @_;
647
648 # nothing to do
649 }
650
651 1;