]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
Refactor update_config_nolock -> write_config
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use PVE::INotify;
9 use PVE::Tools;
10 use PVE::Cluster;
11 use PVE::Storage;
12 use PVE::QemuServer;
13 use Time::HiRes qw( usleep );
14 use PVE::RPCEnvironment;
15
16 use base qw(PVE::AbstractMigrate);
17
18 sub fork_command_pipe {
19 my ($self, $cmd) = @_;
20
21 my $reader = IO::File->new();
22 my $writer = IO::File->new();
23
24 my $orig_pid = $$;
25
26 my $cpid;
27
28 eval { $cpid = open2($reader, $writer, @$cmd); };
29
30 my $err = $@;
31
32 # catch exec errors
33 if ($orig_pid != $$) {
34 $self->log('err', "can't fork command pipe\n");
35 POSIX::_exit(1);
36 kill('KILL', $$);
37 }
38
39 die $err if $err;
40
41 return { writer => $writer, reader => $reader, pid => $cpid };
42 }
43
44 sub finish_command_pipe {
45 my ($self, $cmdpipe, $timeout) = @_;
46
47 my $writer = $cmdpipe->{writer};
48 my $reader = $cmdpipe->{reader};
49
50 $writer->close();
51 $reader->close();
52
53 my $cpid = $cmdpipe->{pid};
54
55 if ($timeout) {
56 for (my $i = 0; $i < $timeout; $i++) {
57 return if !PVE::ProcFSTools::check_process_running($cpid);
58 sleep(1);
59 }
60 }
61
62 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
63 kill(15, $cpid);
64
65 # wait again
66 for (my $i = 0; $i < 10; $i++) {
67 return if !PVE::ProcFSTools::check_process_running($cpid);
68 sleep(1);
69 }
70
71 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
72 kill 9, $cpid;
73 sleep 1;
74 }
75
76 sub fork_tunnel {
77 my ($self, $nodeip, $lport, $rport) = @_;
78
79 my @localtunnelinfo = $lport ? ('-L' , "$lport:localhost:$rport" ) : ();
80
81 my $cmd = [@{$self->{rem_ssh}}, @localtunnelinfo, 'qm', 'mtunnel' ];
82
83 my $tunnel = $self->fork_command_pipe($cmd);
84
85 my $reader = $tunnel->{reader};
86
87 my $helo;
88 eval {
89 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
90 die "no reply\n" if !$helo;
91 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
92 die "got strange reply from mtunnel ('$helo')\n"
93 if $helo !~ m/^tunnel online$/;
94 };
95 my $err = $@;
96
97 if ($err) {
98 $self->finish_command_pipe($tunnel);
99 die "can't open migration tunnel - $err";
100 }
101 return $tunnel;
102 }
103
104 sub finish_tunnel {
105 my ($self, $tunnel) = @_;
106
107 my $writer = $tunnel->{writer};
108
109 eval {
110 PVE::Tools::run_with_timeout(30, sub {
111 print $writer "quit\n";
112 $writer->flush();
113 });
114 };
115 my $err = $@;
116
117 $self->finish_command_pipe($tunnel, 30);
118
119 die $err if $err;
120 }
121
122 sub lock_vm {
123 my ($self, $vmid, $code, @param) = @_;
124
125 return PVE::QemuServer::lock_config($vmid, $code, @param);
126 }
127
128 sub prepare {
129 my ($self, $vmid) = @_;
130
131 my $online = $self->{opts}->{online};
132
133 $self->{storecfg} = PVE::Storage::config();
134
135 # test is VM exist
136 my $conf = $self->{vmconf} = PVE::QemuServer::load_config($vmid);
137
138 PVE::QemuServer::check_lock($conf);
139
140 my $running = 0;
141 if (my $pid = PVE::QemuServer::check_running($vmid)) {
142 die "cant migrate running VM without --online\n" if !$online;
143 $running = $pid;
144
145 $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf);
146
147 }
148
149 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
150 if ($self->{running} || !$self->{opts}->{force}) {
151 die "can't migrate VM which uses local devices\n";
152 } else {
153 $self->log('info', "migrating VM which uses local devices");
154 }
155 }
156
157 # activate volumes
158 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
159 PVE::Storage::activate_volumes($self->{storecfg}, $vollist);
160
161 # fixme: check if storage is available on both nodes
162
163 # test ssh connection
164 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
165 eval { $self->cmd_quiet($cmd); };
166 die "Can't connect to destination address using public key\n" if $@;
167
168 return $running;
169 }
170
171 sub sync_disks {
172 my ($self, $vmid) = @_;
173
174 $self->log('info', "copying disk images");
175
176 my $conf = $self->{vmconf};
177
178 $self->{volumes} = [];
179
180 my $res = [];
181
182 eval {
183
184 my $volhash = {};
185 my $cdromhash = {};
186
187 my $sharedvm = 1;
188
189 my @sids = PVE::Storage::storage_ids($self->{storecfg});
190 foreach my $storeid (@sids) {
191 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
192 next if $scfg->{shared};
193 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
194
195 # get list from PVE::Storage (for unused volumes)
196 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
197 PVE::Storage::foreach_volid($dl, sub {
198 my ($volid, $sid, $volname) = @_;
199
200 # check if storage is available on target node
201 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
202
203 $volhash->{$volid} = 1;
204 $sharedvm = 0; # there is a non-shared disk
205 });
206 }
207
208 # and add used, owned/non-shared disks (just to be sure we have all)
209
210 PVE::QemuServer::foreach_volid($conf, sub {
211 my ($volid, $is_cdrom) = @_;
212
213 return if !$volid;
214
215 die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
216
217 if ($is_cdrom) {
218 die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
219 return if $volid eq 'none';
220 $cdromhash->{$volid} = 1;
221 }
222
223 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
224
225 # check if storage is available on both nodes
226 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
227 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
228
229 return if $scfg->{shared};
230
231 die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
232
233 $sharedvm = 0;
234
235 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
236
237 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
238 if !$owner || ($owner != $self->{vmid});
239
240 $volhash->{$volid} = 1;
241 });
242
243 if ($self->{running} && !$sharedvm) {
244 die "can't do online migration - VM uses local disks\n";
245 }
246
247 # do some checks first
248 foreach my $volid (keys %$volhash) {
249 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
250 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
251
252 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
253 if (!($scfg->{type} eq 'dir' || $scfg->{type} eq 'zfspool') && (!$sharedvm));
254
255 # if file, check if a backing file exist
256 if (!($scfg->{type} eq 'dir' || $scfg->{type} eq 'zfspool') && (!$sharedvm)) {
257 my (undef, undef, undef, $parent) = PVE::Storage::volume_size_info($self->{storecfg}, $volid, 1);
258 die "can't migrate '$volid' as it's a clone of '$parent'" if $parent;
259 }
260 }
261
262 foreach my $volid (keys %$volhash) {
263 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
264 push @{$self->{volumes}}, $volid;
265 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
266 }
267 };
268 die "Failed to sync data - $@" if $@;
269 }
270
271 sub phase1 {
272 my ($self, $vmid) = @_;
273
274 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
275
276 my $conf = $self->{vmconf};
277
278 # set migrate lock in config file
279 $conf->{lock} = 'migrate';
280 PVE::QemuServer::write_config($vmid, $conf, 1);
281
282 sync_disks($self, $vmid);
283
284 };
285
286 sub phase1_cleanup {
287 my ($self, $vmid, $err) = @_;
288
289 $self->log('info', "aborting phase 1 - cleanup resources");
290
291 my $conf = $self->{vmconf};
292 delete $conf->{lock};
293 eval { PVE::QemuServer::write_config($vmid, $conf, 1) };
294 if (my $err = $@) {
295 $self->log('err', $err);
296 }
297
298 if ($self->{volumes}) {
299 foreach my $volid (@{$self->{volumes}}) {
300 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
301 # fixme: try to remove ?
302 }
303 }
304 }
305
306 sub phase2 {
307 my ($self, $vmid) = @_;
308
309 my $conf = $self->{vmconf};
310
311 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
312
313 my $raddr;
314 my $rport;
315 my $nodename = PVE::INotify::nodename();
316
317 ## start on remote node
318 my $cmd = [@{$self->{rem_ssh}}];
319
320 my $spice_ticket;
321 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
322 my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
323 $spice_ticket = $res->{ticket};
324 }
325
326 push @$cmd , 'qm', 'start', $vmid, '--stateuri', 'tcp', '--skiplock', '--migratedfrom', $nodename;
327
328 if ($self->{forcemachine}) {
329 push @$cmd, '--machine', $self->{forcemachine};
330 }
331
332 my $spice_port;
333
334 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
335 # instead we pipe it through STDIN
336 PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
337 my $line = shift;
338
339 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
340 $raddr = $1;
341 $rport = int($2);
342 }
343 elsif ($line =~ m/^migration listens on port (\d+)$/) {
344 $raddr = "localhost";
345 $rport = int($1);
346 }
347 elsif ($line =~ m/^spice listens on port (\d+)$/) {
348 $spice_port = int($1);
349 }
350 }, errfunc => sub {
351 my $line = shift;
352 $self->log('info', $line);
353 });
354
355 die "unable to detect remote migration address\n" if !$raddr;
356
357 ## create tunnel to remote port
358 $self->log('info', "starting ssh migration tunnel");
359 my $pfamily = PVE::Tools::get_host_address_family($nodename);
360 my $lport = ($raddr eq "localhost") ? PVE::Tools::next_migrate_port($pfamily) : undef;
361 $self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport);
362
363 my $start = time();
364 $self->log('info', "starting online/live migration on $raddr:$rport");
365 $self->{livemigration} = 1;
366
367 # load_defaults
368 my $defaults = PVE::QemuServer::load_defaults();
369
370 # always set migrate speed (overwrite kvm default of 32m)
371 # we set a very hight default of 8192m which is basically unlimited
372 my $migrate_speed = $defaults->{migrate_speed} || 8192;
373 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
374 $migrate_speed = $migrate_speed * 1048576;
375 $self->log('info', "migrate_set_speed: $migrate_speed");
376 eval {
377 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
378 };
379 $self->log('info', "migrate_set_speed error: $@") if $@;
380
381 my $migrate_downtime = $defaults->{migrate_downtime};
382 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
383 if (defined($migrate_downtime)) {
384 $self->log('info', "migrate_set_downtime: $migrate_downtime");
385 eval {
386 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
387 };
388 $self->log('info', "migrate_set_downtime error: $@") if $@;
389 }
390
391 eval {
392 PVE::QemuServer::set_migration_caps($vmid);
393 };
394 warn $@ if $@;
395
396 #set cachesize 10% of the total memory
397 my $cachesize = int($conf->{memory}*1048576/10);
398 eval {
399 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => $cachesize);
400 };
401
402 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
403 my $rpcenv = PVE::RPCEnvironment::get();
404 my $authuser = $rpcenv->get_user();
405
406 my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
407
408 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
409 my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
410
411 $self->log('info', "spice client_migrate_info");
412
413 eval {
414 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice',
415 hostname => $proxyticket, 'tls-port' => $spice_port,
416 'cert-subject' => $subject);
417 };
418 $self->log('info', "client_migrate_info error: $@") if $@;
419
420 }
421
422 eval {
423 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:$raddr:$rport");
424 };
425 my $merr = $@;
426 $self->log('info', "migrate uri => tcp:$raddr:$rport failed: $merr") if $merr;
427
428 my $lstat = 0;
429 my $usleep = 2000000;
430 my $i = 0;
431 my $err_count = 0;
432 my $lastrem = undef;
433 my $downtimecounter = 0;
434 while (1) {
435 $i++;
436 my $avglstat = $lstat/$i if $lstat;
437
438 usleep($usleep);
439 my $stat;
440 eval {
441 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
442 };
443 if (my $err = $@) {
444 $err_count++;
445 warn "query migrate failed: $err\n";
446 if ($err_count <= 5) {
447 usleep(1000000);
448 next;
449 }
450 die "too many query migrate failures - aborting\n";
451 }
452
453 if ($stat->{status} =~ m/^(setup)$/im) {
454 sleep(1);
455 next;
456 }
457
458 if ($stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
459 $merr = undef;
460 $err_count = 0;
461 if ($stat->{status} eq 'completed') {
462 my $delay = time() - $start;
463 if ($delay > 0) {
464 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
465 my $downtime = $stat->{downtime} || 0;
466 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
467 }
468 }
469
470 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
471 die "aborting\n"
472 }
473
474 if ($stat->{status} ne 'active') {
475 $self->log('info', "migration status: $stat->{status}");
476 last;
477 }
478
479 if ($stat->{ram}->{transferred} ne $lstat) {
480 my $trans = $stat->{ram}->{transferred} || 0;
481 my $rem = $stat->{ram}->{remaining} || 0;
482 my $total = $stat->{ram}->{total} || 0;
483 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
484 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
485 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
486 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
487 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
488 #reduce sleep if remainig memory if lower than the everage transfert
489 $usleep = 300000 if $avglstat && $rem < $avglstat;
490
491 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
492 "remaining ${rem}), total ${total})");
493
494 if (${xbzrlecachesize}) {
495 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
496 }
497
498 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
499 $downtimecounter++;
500 }
501 $lastrem = $rem;
502
503 if ($downtimecounter > 5) {
504 $downtimecounter = 0;
505 $migrate_downtime *= 2;
506 $self->log('info', "migrate_set_downtime: $migrate_downtime");
507 eval {
508 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
509 };
510 $self->log('info', "migrate_set_downtime error: $@") if $@;
511 }
512
513 }
514
515
516 $lstat = $stat->{ram}->{transferred};
517
518 } else {
519 die $merr if $merr;
520 die "unable to parse migration status '$stat->{status}' - aborting\n";
521 }
522 }
523 #to be sure tat the tunnel is closed
524 if ($self->{tunnel}) {
525 eval { finish_tunnel($self, $self->{tunnel}); };
526 if (my $err = $@) {
527 $self->log('err', $err);
528 $self->{errors} = 1;
529 }
530 }
531 }
532
533 sub phase2_cleanup {
534 my ($self, $vmid, $err) = @_;
535
536 return if !$self->{errors};
537 $self->{phase2errors} = 1;
538
539 $self->log('info', "aborting phase 2 - cleanup resources");
540
541 $self->log('info', "migrate_cancel");
542 eval {
543 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
544 };
545 $self->log('info', "migrate_cancel error: $@") if $@;
546
547 my $conf = $self->{vmconf};
548 delete $conf->{lock};
549 eval { PVE::QemuServer::write_config($vmid, $conf, 1) };
550 if (my $err = $@) {
551 $self->log('err', $err);
552 }
553
554 # cleanup ressources on target host
555 my $nodename = PVE::INotify::nodename();
556
557 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
558 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
559 if (my $err = $@) {
560 $self->log('err', $err);
561 $self->{errors} = 1;
562 }
563
564 if ($self->{tunnel}) {
565 eval { finish_tunnel($self, $self->{tunnel}); };
566 if (my $err = $@) {
567 $self->log('err', $err);
568 $self->{errors} = 1;
569 }
570 }
571 }
572
573 sub phase3 {
574 my ($self, $vmid) = @_;
575
576 my $volids = $self->{volumes};
577 return if $self->{phase2errors};
578
579 # destroy local copies
580 foreach my $volid (@$volids) {
581 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
582 if (my $err = $@) {
583 $self->log('err', "removing local copy of '$volid' failed - $err");
584 $self->{errors} = 1;
585 last if $err =~ /^interrupted by signal$/;
586 }
587 }
588 }
589
590 sub phase3_cleanup {
591 my ($self, $vmid, $err) = @_;
592
593 my $conf = $self->{vmconf};
594 return if $self->{phase2errors};
595
596 # move config to remote node
597 my $conffile = PVE::QemuServer::config_file($vmid);
598 my $newconffile = PVE::QemuServer::config_file($vmid, $self->{node});
599
600 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
601 if !rename($conffile, $newconffile);
602
603 if ($self->{livemigration}) {
604 # now that config file is move, we can resume vm on target if livemigrate
605 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
606 eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
607 errfunc => sub {
608 my $line = shift;
609 $self->log('err', $line);
610 });
611 };
612 if (my $err = $@) {
613 $self->log('err', $err);
614 $self->{errors} = 1;
615 }
616 }
617
618 eval {
619
620 my $timer = 0;
621 if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) {
622 $self->log('info', "Waiting for spice server migration");
623 while (1) {
624 my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice');
625 last if int($res->{'migrated'}) == 1;
626 last if $timer > 50;
627 $timer ++;
628 usleep(200000);
629 }
630 }
631 };
632
633 # always stop local VM
634 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
635 if (my $err = $@) {
636 $self->log('err', "stopping vm failed - $err");
637 $self->{errors} = 1;
638 }
639
640 # always deactivate volumes - avoid lvm LVs to be active on several nodes
641 eval {
642 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
643 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
644 };
645 if (my $err = $@) {
646 $self->log('err', $err);
647 $self->{errors} = 1;
648 }
649
650 # clear migrate lock
651 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
652 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
653 }
654
655 sub final_cleanup {
656 my ($self, $vmid) = @_;
657
658 # nothing to do
659 }
660
661 1;