]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
migrate: check if storage is available
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use PVE::INotify;
9 use PVE::Tools;
10 use PVE::Cluster;
11 use PVE::Storage;
12 use PVE::QemuServer;
13 use Time::HiRes qw( usleep );
14 use PVE::RPCEnvironment;
15
16 use base qw(PVE::AbstractMigrate);
17
18 sub fork_command_pipe {
19 my ($self, $cmd) = @_;
20
21 my $reader = IO::File->new();
22 my $writer = IO::File->new();
23
24 my $orig_pid = $$;
25
26 my $cpid;
27
28 eval { $cpid = open2($reader, $writer, @$cmd); };
29
30 my $err = $@;
31
32 # catch exec errors
33 if ($orig_pid != $$) {
34 $self->log('err', "can't fork command pipe\n");
35 POSIX::_exit(1);
36 kill('KILL', $$);
37 }
38
39 die $err if $err;
40
41 return { writer => $writer, reader => $reader, pid => $cpid };
42 }
43
44 sub finish_command_pipe {
45 my ($self, $cmdpipe, $timeout) = @_;
46
47 my $writer = $cmdpipe->{writer};
48 my $reader = $cmdpipe->{reader};
49
50 $writer->close();
51 $reader->close();
52
53 my $cpid = $cmdpipe->{pid};
54
55 if ($timeout) {
56 for (my $i = 0; $i < $timeout; $i++) {
57 return if !PVE::ProcFSTools::check_process_running($cpid);
58 sleep(1);
59 }
60 }
61
62 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
63 kill(15, $cpid);
64
65 # wait again
66 for (my $i = 0; $i < 10; $i++) {
67 return if !PVE::ProcFSTools::check_process_running($cpid);
68 sleep(1);
69 }
70
71 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
72 kill 9, $cpid;
73 sleep 1;
74 }
75
76 sub fork_tunnel {
77 my ($self, $nodeip, $lport, $rport) = @_;
78
79 my @localtunnelinfo = $lport ? ('-L' , "$lport:localhost:$rport" ) : ();
80
81 my $cmd = [@{$self->{rem_ssh}}, @localtunnelinfo, 'qm', 'mtunnel' ];
82
83 my $tunnel = $self->fork_command_pipe($cmd);
84
85 my $reader = $tunnel->{reader};
86
87 my $helo;
88 eval {
89 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
90 die "no reply\n" if !$helo;
91 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
92 die "got strange reply from mtunnel ('$helo')\n"
93 if $helo !~ m/^tunnel online$/;
94 };
95 my $err = $@;
96
97 if ($err) {
98 $self->finish_command_pipe($tunnel);
99 die "can't open migration tunnel - $err";
100 }
101 return $tunnel;
102 }
103
104 sub finish_tunnel {
105 my ($self, $tunnel) = @_;
106
107 my $writer = $tunnel->{writer};
108
109 eval {
110 PVE::Tools::run_with_timeout(30, sub {
111 print $writer "quit\n";
112 $writer->flush();
113 });
114 };
115 my $err = $@;
116
117 $self->finish_command_pipe($tunnel, 30);
118
119 die $err if $err;
120 }
121
122 sub lock_vm {
123 my ($self, $vmid, $code, @param) = @_;
124
125 return PVE::QemuConfig->lock_config($vmid, $code, @param);
126 }
127
128 sub prepare {
129 my ($self, $vmid) = @_;
130
131 my $online = $self->{opts}->{online};
132
133 $self->{storecfg} = PVE::Storage::config();
134
135 # test is VM exist
136 my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid);
137
138 PVE::QemuConfig->check_lock($conf);
139
140 my $running = 0;
141 if (my $pid = PVE::QemuServer::check_running($vmid)) {
142 die "cant migrate running VM without --online\n" if !$online;
143 $running = $pid;
144
145 $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
146
147 }
148
149 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
150 if ($self->{running} || !$self->{opts}->{force}) {
151 die "can't migrate VM which uses local devices\n";
152 } else {
153 $self->log('info', "migrating VM which uses local devices");
154 }
155 }
156
157 # activate volumes
158 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
159 PVE::Storage::activate_volumes($self->{storecfg}, $vollist);
160
161 foreach my $volid (@$vollist) {
162 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
163
164 # check if storage is available on both nodes
165 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
166 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
167 }
168
169 # test ssh connection
170 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
171 eval { $self->cmd_quiet($cmd); };
172 die "Can't connect to destination address using public key\n" if $@;
173
174 return $running;
175 }
176
177 sub sync_disks {
178 my ($self, $vmid) = @_;
179
180 $self->log('info', "copying disk images");
181
182 my $conf = $self->{vmconf};
183
184 $self->{volumes} = [];
185
186 my $res = [];
187
188 eval {
189
190 my $volhash = {};
191 my $cdromhash = {};
192
193 my $sharedvm = 1;
194
195 my @sids = PVE::Storage::storage_ids($self->{storecfg});
196 foreach my $storeid (@sids) {
197 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
198 next if $scfg->{shared};
199 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
200
201 # get list from PVE::Storage (for unused volumes)
202 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
203 PVE::Storage::foreach_volid($dl, sub {
204 my ($volid, $sid, $volname) = @_;
205
206 # check if storage is available on target node
207 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
208
209 $volhash->{$volid} = 1;
210 $sharedvm = 0; # there is a non-shared disk
211 });
212 }
213
214 # and add used, owned/non-shared disks (just to be sure we have all)
215
216 PVE::QemuServer::foreach_volid($conf, sub {
217 my ($volid, $is_cdrom) = @_;
218
219 return if !$volid;
220
221 die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
222
223 if ($is_cdrom) {
224 die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
225 return if $volid eq 'none';
226 $cdromhash->{$volid} = 1;
227 }
228
229 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
230
231 # check if storage is available on both nodes
232 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
233 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
234
235 return if $scfg->{shared};
236
237 die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
238
239 $sharedvm = 0;
240
241 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
242
243 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
244 if !$owner || ($owner != $self->{vmid});
245
246 $volhash->{$volid} = 1;
247 });
248
249 if ($self->{running} && !$sharedvm) {
250 die "can't do online migration - VM uses local disks\n";
251 }
252
253 # do some checks first
254 foreach my $volid (keys %$volhash) {
255 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
256 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
257
258 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
259 if (!($scfg->{type} eq 'dir' || $scfg->{type} eq 'zfspool') && (!$sharedvm));
260
261 # if file, check if a backing file exist
262 if (!($scfg->{type} eq 'dir' || $scfg->{type} eq 'zfspool') && (!$sharedvm)) {
263 my (undef, undef, undef, $parent) = PVE::Storage::volume_size_info($self->{storecfg}, $volid, 1);
264 die "can't migrate '$volid' as it's a clone of '$parent'" if $parent;
265 }
266 }
267
268 foreach my $volid (keys %$volhash) {
269 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
270 push @{$self->{volumes}}, $volid;
271 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
272 }
273 };
274 die "Failed to sync data - $@" if $@;
275 }
276
277 sub phase1 {
278 my ($self, $vmid) = @_;
279
280 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
281
282 my $conf = $self->{vmconf};
283
284 # set migrate lock in config file
285 $conf->{lock} = 'migrate';
286 PVE::QemuConfig->write_config($vmid, $conf);
287
288 sync_disks($self, $vmid);
289
290 };
291
292 sub phase1_cleanup {
293 my ($self, $vmid, $err) = @_;
294
295 $self->log('info', "aborting phase 1 - cleanup resources");
296
297 my $conf = $self->{vmconf};
298 delete $conf->{lock};
299 eval { PVE::QemuConfig->write_config($vmid, $conf) };
300 if (my $err = $@) {
301 $self->log('err', $err);
302 }
303
304 if ($self->{volumes}) {
305 foreach my $volid (@{$self->{volumes}}) {
306 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
307 # fixme: try to remove ?
308 }
309 }
310 }
311
312 sub phase2 {
313 my ($self, $vmid) = @_;
314
315 my $conf = $self->{vmconf};
316
317 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
318
319 my $raddr;
320 my $rport;
321 my $nodename = PVE::INotify::nodename();
322
323 ## start on remote node
324 my $cmd = [@{$self->{rem_ssh}}];
325
326 my $spice_ticket;
327 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
328 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
329 $spice_ticket = $res->{ticket};
330 }
331
332 push @$cmd , 'qm', 'start', $vmid, '--stateuri', 'tcp', '--skiplock', '--migratedfrom', $nodename;
333
334 if ($self->{forcemachine}) {
335 push @$cmd, '--machine', $self->{forcemachine};
336 }
337
338 my $spice_port;
339
340 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
341 # instead we pipe it through STDIN
342 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
343 my $line = shift;
344
345 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
346 $raddr = $1;
347 $rport = int($2);
348 }
349 elsif ($line =~ m/^migration listens on port (\d+)$/) {
350 $raddr = "localhost";
351 $rport = int($1);
352 }
353 elsif ($line =~ m/^spice listens on port (\d+)$/) {
354 $spice_port = int($1);
355 }
356 }, errfunc => sub {
357 my $line = shift;
358 $self->log('info', $line);
359 });
360
361 die "unable to detect remote migration address\n" if !$raddr;
362
363 ## create tunnel to remote port
364 $self->log('info', "starting ssh migration tunnel");
365 my $pfamily = PVE::Tools::get_host_address_family($nodename);
366 my $lport = ($raddr eq "localhost") ? PVE::Tools::next_migrate_port($pfamily) : undef;
367 $self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport);
368
369 my $start = time();
370 $self->log('info', "starting online/live migration on $raddr:$rport");
371 $self->{livemigration} = 1;
372
373 # load_defaults
374 my $defaults = PVE::QemuServer::load_defaults();
375
376 # always set migrate speed (overwrite kvm default of 32m)
377 # we set a very hight default of 8192m which is basically unlimited
378 my $migrate_speed = $defaults->{migrate_speed} || 8192;
379 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
380 $migrate_speed = $migrate_speed * 1048576;
381 $self->log('info', "migrate_set_speed: $migrate_speed");
382 eval {
383 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
384 };
385 $self->log('info', "migrate_set_speed error: $@") if $@;
386
387 my $migrate_downtime = $defaults->{migrate_downtime};
388 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
389 if (defined($migrate_downtime)) {
390 $self->log('info', "migrate_set_downtime: $migrate_downtime");
391 eval {
392 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
393 };
394 $self->log('info', "migrate_set_downtime error: $@") if $@;
395 }
396
397 eval {
398 PVE::QemuServer::set_migration_caps($vmid);
399 };
400 warn $@ if $@;
401
402 #set cachesize 10% of the total memory
403 my $cachesize = int($conf->{memory}*1048576/10);
404 eval {
405 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => $cachesize);
406 };
407
408 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
409 my $rpcenv = PVE::RPCEnvironment::get();
410 my $authuser = $rpcenv->get_user();
411
412 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
413
414 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
415 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
416
417 $self->log('info', "spice client_migrate_info");
418
419 eval {
420 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
421 hostname => $proxyticket, 'tls-port' => $spice_port,
422 'cert-subject' => $subject);
423 };
424 $self->log('info', "client_migrate_info error: $@") if $@;
425
426 }
427
428 eval {
429 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:$raddr:$rport");
430 };
431 my $merr = $@;
432 $self->log('info', "migrate uri => tcp:$raddr:$rport failed: $merr") if $merr;
433
434 my $lstat = 0;
435 my $usleep = 2000000;
436 my $i = 0;
437 my $err_count = 0;
438 my $lastrem = undef;
439 my $downtimecounter = 0;
440 while (1) {
441 $i++;
442 my $avglstat = $lstat/$i if $lstat;
443
444 usleep($usleep);
445 my $stat;
446 eval {
447 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
448 };
449 if (my $err = $@) {
450 $err_count++;
451 warn "query migrate failed: $err\n";
452 if ($err_count <= 5) {
453 usleep(1000000);
454 next;
455 }
456 die "too many query migrate failures - aborting\n";
457 }
458
459 if ($stat->{status} =~ m/^(setup)$/im) {
460 sleep(1);
461 next;
462 }
463
464 if ($stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
465 $merr = undef;
466 $err_count = 0;
467 if ($stat->{status} eq 'completed') {
468 my $delay = time() - $start;
469 if ($delay > 0) {
470 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
471 my $downtime = $stat->{downtime} || 0;
472 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
473 }
474 }
475
476 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
477 die "aborting\n"
478 }
479
480 if ($stat->{status} ne 'active') {
481 $self->log('info', "migration status: $stat->{status}");
482 last;
483 }
484
485 if ($stat->{ram}->{transferred} ne $lstat) {
486 my $trans = $stat->{ram}->{transferred} || 0;
487 my $rem = $stat->{ram}->{remaining} || 0;
488 my $total = $stat->{ram}->{total} || 0;
489 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
490 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
491 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
492 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
493 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
494 #reduce sleep if remainig memory if lower than the everage transfert
495 $usleep = 300000 if $avglstat && $rem < $avglstat;
496
497 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
498 "remaining ${rem}), total ${total})");
499
500 if (${xbzrlecachesize}) {
501 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
502 }
503
504 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
505 $downtimecounter++;
506 }
507 $lastrem = $rem;
508
509 if ($downtimecounter > 5) {
510 $downtimecounter = 0;
511 $migrate_downtime *= 2;
512 $self->log('info', "migrate_set_downtime: $migrate_downtime");
513 eval {
514 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
515 };
516 $self->log('info', "migrate_set_downtime error: $@") if $@;
517 }
518
519 }
520
521
522 $lstat = $stat->{ram}->{transferred};
523
524 } else {
525 die $merr if $merr;
526 die "unable to parse migration status '$stat->{status}' - aborting\n";
527 }
528 }
529 #to be sure tat the tunnel is closed
530 if ($self->{tunnel}) {
531 eval { finish_tunnel($self, $self->{tunnel}); };
532 if (my $err = $@) {
533 $self->log('err', $err);
534 $self->{errors} = 1;
535 }
536 }
537 }
538
539 sub phase2_cleanup {
540 my ($self, $vmid, $err) = @_;
541
542 return if !$self->{errors};
543 $self->{phase2errors} = 1;
544
545 $self->log('info', "aborting phase 2 - cleanup resources");
546
547 $self->log('info', "migrate_cancel");
548 eval {
549 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
550 };
551 $self->log('info', "migrate_cancel error: $@") if $@;
552
553 my $conf = $self->{vmconf};
554 delete $conf->{lock};
555 eval { PVE::QemuConfig->write_config($vmid, $conf) };
556 if (my $err = $@) {
557 $self->log('err', $err);
558 }
559
560 # cleanup ressources on target host
561 my $nodename = PVE::INotify::nodename();
562
563 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
564 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
565 if (my $err = $@) {
566 $self->log('err', $err);
567 $self->{errors} = 1;
568 }
569
570 if ($self->{tunnel}) {
571 eval { finish_tunnel($self, $self->{tunnel}); };
572 if (my $err = $@) {
573 $self->log('err', $err);
574 $self->{errors} = 1;
575 }
576 }
577 }
578
579 sub phase3 {
580 my ($self, $vmid) = @_;
581
582 my $volids = $self->{volumes};
583 return if $self->{phase2errors};
584
585 # destroy local copies
586 foreach my $volid (@$volids) {
587 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
588 if (my $err = $@) {
589 $self->log('err', "removing local copy of '$volid' failed - $err");
590 $self->{errors} = 1;
591 last if $err =~ /^interrupted by signal$/;
592 }
593 }
594 }
595
596 sub phase3_cleanup {
597 my ($self, $vmid, $err) = @_;
598
599 my $conf = $self->{vmconf};
600 return if $self->{phase2errors};
601
602 # move config to remote node
603 my $conffile = PVE::QemuConfig->config_file($vmid);
604 my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
605
606 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
607 if !rename($conffile, $newconffile);
608
609 if ($self->{livemigration}) {
610 # now that config file is move, we can resume vm on target if livemigrate
611 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
612 eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
613 errfunc => sub {
614 my $line = shift;
615 $self->log('err', $line);
616 });
617 };
618 if (my $err = $@) {
619 $self->log('err', $err);
620 $self->{errors} = 1;
621 }
622 }
623
624 eval {
625
626 my $timer = 0;
627 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
628 $self->log('info', "Waiting for spice server migration");
629 while (1) {
630 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
631 last if int($res->{'migrated'}) == 1;
632 last if $timer > 50;
633 $timer ++;
634 usleep(200000);
635 }
636 }
637 };
638
639 # always stop local VM
640 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
641 if (my $err = $@) {
642 $self->log('err', "stopping vm failed - $err");
643 $self->{errors} = 1;
644 }
645
646 # always deactivate volumes - avoid lvm LVs to be active on several nodes
647 eval {
648 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
649 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
650 };
651 if (my $err = $@) {
652 $self->log('err', $err);
653 $self->{errors} = 1;
654 }
655
656 # clear migrate lock
657 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
658 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
659 }
660
661 sub final_cleanup {
662 my ($self, $vmid) = @_;
663
664 # nothing to do
665 }
666
667 1;