]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
fix bug #381: use PVE::Tools::next_migrate_port()
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use PVE::INotify;
9 use PVE::Tools;
10 use PVE::Cluster;
11 use PVE::Storage;
12 use PVE::QemuServer;
13 use Time::HiRes qw( usleep );
14
15 use base qw(PVE::AbstractMigrate);
16
17 sub fork_command_pipe {
18 my ($self, $cmd) = @_;
19
20 my $reader = IO::File->new();
21 my $writer = IO::File->new();
22
23 my $orig_pid = $$;
24
25 my $cpid;
26
27 eval { $cpid = open2($reader, $writer, @$cmd); };
28
29 my $err = $@;
30
31 # catch exec errors
32 if ($orig_pid != $$) {
33 $self->log('err', "can't fork command pipe\n");
34 POSIX::_exit(1);
35 kill('KILL', $$);
36 }
37
38 die $err if $err;
39
40 return { writer => $writer, reader => $reader, pid => $cpid };
41 }
42
43 sub finish_command_pipe {
44 my ($self, $cmdpipe, $timeout) = @_;
45
46 my $writer = $cmdpipe->{writer};
47 my $reader = $cmdpipe->{reader};
48
49 $writer->close();
50 $reader->close();
51
52 my $cpid = $cmdpipe->{pid};
53
54 if ($timeout) {
55 for (my $i = 0; $i < $timeout; $i++) {
56 return if !PVE::ProcFSTools::check_process_running($cpid);
57 sleep(1);
58 }
59 }
60
61 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
62 kill(15, $cpid);
63
64 # wait again
65 for (my $i = 0; $i < 10; $i++) {
66 return if !PVE::ProcFSTools::check_process_running($cpid);
67 sleep(1);
68 }
69
70 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
71 kill 9, $cpid;
72 sleep 1;
73 }
74
75 sub fork_tunnel {
76 my ($self, $nodeip, $lport, $rport) = @_;
77
78 my $cmd = [@{$self->{rem_ssh}}, '-L', "$lport:localhost:$rport",
79 'qm', 'mtunnel' ];
80
81 my $tunnel = $self->fork_command_pipe($cmd);
82
83 my $reader = $tunnel->{reader};
84
85 my $helo;
86 eval {
87 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
88 die "no reply\n" if !$helo;
89 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
90 die "got strange reply from mtunnel ('$helo')\n"
91 if $helo !~ m/^tunnel online$/;
92 };
93 my $err = $@;
94
95 if ($err) {
96 $self->finish_command_pipe($tunnel);
97 die "can't open migration tunnel - $err";
98 }
99 return $tunnel;
100 }
101
102 sub finish_tunnel {
103 my ($self, $tunnel) = @_;
104
105 my $writer = $tunnel->{writer};
106
107 eval {
108 PVE::Tools::run_with_timeout(30, sub {
109 print $writer "quit\n";
110 $writer->flush();
111 });
112 };
113 my $err = $@;
114
115 $self->finish_command_pipe($tunnel, 30);
116
117 die $err if $err;
118 }
119
120 sub lock_vm {
121 my ($self, $vmid, $code, @param) = @_;
122
123 return PVE::QemuServer::lock_config($vmid, $code, @param);
124 }
125
126 sub prepare {
127 my ($self, $vmid) = @_;
128
129 my $online = $self->{opts}->{online};
130
131 $self->{storecfg} = PVE::Storage::config();
132
133 # test is VM exist
134 my $conf = $self->{vmconf} = PVE::QemuServer::load_config($vmid);
135
136 PVE::QemuServer::check_lock($conf);
137
138 my $running = 0;
139 if (my $pid = PVE::QemuServer::check_running($vmid)) {
140 die "cant migrate running VM without --online\n" if !$online;
141 $running = $pid;
142 }
143
144 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
145 if ($self->{running} || !$self->{opts}->{force}) {
146 die "can't migrate VM which uses local devices\n";
147 } else {
148 $self->log('info', "migrating VM which uses local devices");
149 }
150 }
151
152 # activate volumes
153 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
154 PVE::Storage::activate_volumes($self->{storecfg}, $vollist);
155
156 # fixme: check if storage is available on both nodes
157
158 # test ssh connection
159 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
160 eval { $self->cmd_quiet($cmd); };
161 die "Can't connect to destination address using public key\n" if $@;
162
163 return $running;
164 }
165
166 sub sync_disks {
167 my ($self, $vmid) = @_;
168
169 $self->log('info', "copying disk images");
170
171 my $conf = $self->{vmconf};
172
173 $self->{volumes} = [];
174
175 my $res = [];
176
177 eval {
178
179 my $volhash = {};
180 my $cdromhash = {};
181
182 my $sharedvm = 1;
183
184 my @sids = PVE::Storage::storage_ids($self->{storecfg});
185 foreach my $storeid (@sids) {
186 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
187 next if $scfg->{shared};
188 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
189
190 # get list from PVE::Storage (for unused volumes)
191 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
192 PVE::Storage::foreach_volid($dl, sub {
193 my ($volid, $sid, $volname) = @_;
194
195 # check if storage is available on target node
196 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
197
198 $volhash->{$volid} = 1;
199 $sharedvm = 0; # there is a non-shared disk
200 });
201 }
202
203 # and add used, owned/non-shared disks (just to be sure we have all)
204
205 PVE::QemuServer::foreach_volid($conf, sub {
206 my ($volid, $is_cdrom) = @_;
207
208 return if !$volid;
209
210 die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
211
212 if ($is_cdrom) {
213 die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
214 return if $volid eq 'none';
215 $cdromhash->{$volid} = 1;
216 }
217
218 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
219
220 # check if storage is available on both nodes
221 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
222 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
223
224 return if $scfg->{shared};
225
226 die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
227
228 $sharedvm = 0;
229
230 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
231
232 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
233 if !$owner || ($owner != $self->{vmid});
234
235 $volhash->{$volid} = 1;
236 });
237
238 if ($self->{running} && !$sharedvm) {
239 die "can't do online migration - VM uses local disks\n";
240 }
241
242 # do some checks first
243 foreach my $volid (keys %$volhash) {
244 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
245 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
246
247 die "can't migrate '$volid' - storagy type '$scfg->{type}' not supported\n"
248 if $scfg->{type} ne 'dir';
249
250 # if file, check if a backing file exist
251 if (($scfg->{type} eq 'dir') && (!$sharedvm)) {
252 my (undef, undef, undef, $parent) = PVE::Storage::volume_size_info($self->{storecfg}, $volid, 1);
253 die "can't migrate '$volid' as it's a clone of '$parent'" if $parent;
254 }
255 }
256
257 foreach my $volid (keys %$volhash) {
258 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
259 push @{$self->{volumes}}, $volid;
260 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
261 }
262 };
263 die "Failed to sync data - $@" if $@;
264 }
265
266 sub phase1 {
267 my ($self, $vmid) = @_;
268
269 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
270
271 my $conf = $self->{vmconf};
272
273 # set migrate lock in config file
274 $conf->{lock} = 'migrate';
275 PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
276
277 sync_disks($self, $vmid);
278
279 };
280
281 sub phase1_cleanup {
282 my ($self, $vmid, $err) = @_;
283
284 $self->log('info', "aborting phase 1 - cleanup resources");
285
286 my $conf = $self->{vmconf};
287 delete $conf->{lock};
288 eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) };
289 if (my $err = $@) {
290 $self->log('err', $err);
291 }
292
293 if ($self->{volumes}) {
294 foreach my $volid (@{$self->{volumes}}) {
295 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
296 # fixme: try to remove ?
297 }
298 }
299 }
300
301 sub phase2 {
302 my ($self, $vmid) = @_;
303
304 my $conf = $self->{vmconf};
305
306 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
307
308 my $rport;
309
310 my $nodename = PVE::INotify::nodename();
311
312 ## start on remote node
313 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'start',
314 $vmid, '--stateuri', 'tcp', '--skiplock', '--migratedfrom', $nodename];
315
316 PVE::Tools::run_command($cmd, outfunc => sub {
317 my $line = shift;
318
319 if ($line =~ m/^migration listens on port (\d+)$/) {
320 $rport = $1;
321 }
322 }, errfunc => sub {
323 my $line = shift;
324 $self->log('info', $line);
325 });
326
327 die "unable to detect remote migration port\n" if !$rport;
328
329 $self->log('info', "starting migration tunnel");
330
331 ## create tunnel to remote port
332 my $lport = PVE::Tools::next_migrate_port();
333 $self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport);
334
335 $self->log('info', "starting online/live migration on port $lport");
336 # start migration
337
338 my $start = time();
339
340 # load_defaults
341 my $defaults = PVE::QemuServer::load_defaults();
342
343 # always set migrate speed (overwrite kvm default of 32m)
344 # we set a very hight default of 8192m which is basically unlimited
345 my $migrate_speed = $defaults->{migrate_speed} || 8192;
346 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
347 $migrate_speed = $migrate_speed * 1048576;
348 $self->log('info', "migrate_set_speed: $migrate_speed");
349 eval {
350 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
351 };
352 $self->log('info', "migrate_set_speed error: $@") if $@;
353
354 my $migrate_downtime = $defaults->{migrate_downtime};
355 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
356 if (defined($migrate_downtime)) {
357 $self->log('info', "migrate_set_downtime: $migrate_downtime");
358 eval {
359 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
360 };
361 $self->log('info', "migrate_set_downtime error: $@") if $@;
362 }
363
364 my $capabilities = {};
365 $capabilities->{capability} = "xbzrle";
366 $capabilities->{state} = JSON::false;
367
368 eval {
369 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-capabilities", capabilities => [$capabilities]);
370 };
371
372 #set cachesize 10% of the total memory
373 my $cachesize = int($conf->{memory}*1048576/10);
374 eval {
375 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => $cachesize);
376 };
377
378 eval {
379 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:localhost:$lport");
380 };
381 my $merr = $@;
382
383 my $lstat = 0;
384 my $usleep = 2000000;
385 my $i = 0;
386 my $err_count = 0;
387 my $lastrem = undef;
388 my $downtimecounter = 0;
389 while (1) {
390 $i++;
391 my $avglstat = $lstat/$i if $lstat;
392
393 usleep($usleep);
394 my $stat;
395 eval {
396 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
397 };
398 if (my $err = $@) {
399 $err_count++;
400 warn "query migrate failed: $err\n";
401 if ($err_count <= 5) {
402 usleep(1000000);
403 next;
404 }
405 die "too many query migrate failures - aborting\n";
406 }
407 if ($stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
408 $merr = undef;
409 $err_count = 0;
410 if ($stat->{status} eq 'completed') {
411 my $delay = time() - $start;
412 if ($delay > 0) {
413 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
414 my $downtime = $stat->{downtime} || 0;
415 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
416 }
417 }
418
419 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
420 die "aborting\n"
421 }
422
423 if ($stat->{status} ne 'active') {
424 $self->log('info', "migration status: $stat->{status}");
425 last;
426 }
427
428 if ($stat->{ram}->{transferred} ne $lstat) {
429 my $trans = $stat->{ram}->{transferred} || 0;
430 my $rem = $stat->{ram}->{remaining} || 0;
431 my $total = $stat->{ram}->{total} || 0;
432 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
433 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
434 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
435 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
436 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
437 #reduce sleep if remainig memory if lower than the everage transfert
438 $usleep = 300000 if $avglstat && $rem < $avglstat;
439
440 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
441 "remaining ${rem}), total ${total})");
442
443 #$self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
444 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
445 $downtimecounter++;
446 }
447 $lastrem = $rem;
448
449 if ($downtimecounter > 5) {
450 $downtimecounter = 0;
451 $migrate_downtime *= 2;
452 $self->log('info', "migrate_set_downtime: $migrate_downtime");
453 eval {
454 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
455 };
456 $self->log('info', "migrate_set_downtime error: $@") if $@;
457 }
458
459 }
460
461
462 $lstat = $stat->{ram}->{transferred};
463
464 } else {
465 die $merr if $merr;
466 die "unable to parse migration status '$stat->{status}' - aborting\n";
467 }
468 }
469 }
470
471 sub phase2_cleanup {
472 my ($self, $vmid, $err) = @_;
473
474 return if !$self->{errors};
475 $self->{phase2errors} = 1;
476
477 $self->log('info', "aborting phase 2 - cleanup resources");
478
479 $self->log('info', "migrate_cancel");
480 eval {
481 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
482 };
483 $self->log('info', "migrate_cancel error: $@") if $@;
484
485 my $conf = $self->{vmconf};
486 delete $conf->{lock};
487 eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) };
488 if (my $err = $@) {
489 $self->log('err', $err);
490 }
491
492 # cleanup ressources on target host
493 my $nodename = PVE::INotify::nodename();
494
495 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
496 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
497 if (my $err = $@) {
498 $self->log('err', $err);
499 $self->{errors} = 1;
500 }
501 }
502
503 sub phase3 {
504 my ($self, $vmid) = @_;
505
506 my $volids = $self->{volumes};
507 return if $self->{phase2errors};
508
509 # destroy local copies
510 foreach my $volid (@$volids) {
511 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
512 if (my $err = $@) {
513 $self->log('err', "removing local copy of '$volid' failed - $err");
514 $self->{errors} = 1;
515 last if $err =~ /^interrupted by signal$/;
516 }
517 }
518 }
519
520 sub phase3_cleanup {
521 my ($self, $vmid, $err) = @_;
522
523 my $conf = $self->{vmconf};
524 return if $self->{phase2errors};
525
526 # move config to remote node
527 my $conffile = PVE::QemuServer::config_file($vmid);
528 my $newconffile = PVE::QemuServer::config_file($vmid, $self->{node});
529
530 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
531 if !rename($conffile, $newconffile);
532
533 # now that config file is move, we can resume vm on target if livemigrate
534 if ($self->{tunnel}) {
535 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock'];
536 eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
537 errfunc => sub {
538 my $line = shift;
539 $self->log('err', $line);
540 });
541 };
542 if (my $err = $@) {
543 $self->log('err', $err);
544 $self->{errors} = 1;
545 }
546 }
547
548 # always stop local VM
549 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
550 if (my $err = $@) {
551 $self->log('err', "stopping vm failed - $err");
552 $self->{errors} = 1;
553 }
554
555 if ($self->{tunnel}) {
556 eval { finish_tunnel($self, $self->{tunnel}); };
557 if (my $err = $@) {
558 $self->log('err', $err);
559 $self->{errors} = 1;
560 }
561 }
562
563 # always deactivate volumes - avoid lvm LVs to be active on several nodes
564 eval {
565 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
566 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
567 };
568 if (my $err = $@) {
569 $self->log('err', $err);
570 $self->{errors} = 1;
571 }
572
573 # clear migrate lock
574 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
575 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
576 }
577
578 sub final_cleanup {
579 my ($self, $vmid) = @_;
580
581 # nothing to do
582 }
583
584 1;