]> git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
forbid offline migration of a non shared volume if it's a clone
[qemu-server.git] / PVE / QemuMigrate.pm
1 package PVE::QemuMigrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use IO::File;
7 use IPC::Open2;
8 use PVE::INotify;
9 use PVE::Cluster;
10 use PVE::Storage;
11 use PVE::QemuServer;
12 use Time::HiRes qw( usleep );
13
14 use base qw(PVE::AbstractMigrate);
15
16 sub fork_command_pipe {
17 my ($self, $cmd) = @_;
18
19 my $reader = IO::File->new();
20 my $writer = IO::File->new();
21
22 my $orig_pid = $$;
23
24 my $cpid;
25
26 eval { $cpid = open2($reader, $writer, @$cmd); };
27
28 my $err = $@;
29
30 # catch exec errors
31 if ($orig_pid != $$) {
32 $self->log('err', "can't fork command pipe\n");
33 POSIX::_exit(1);
34 kill('KILL', $$);
35 }
36
37 die $err if $err;
38
39 return { writer => $writer, reader => $reader, pid => $cpid };
40 }
41
42 sub finish_command_pipe {
43 my ($self, $cmdpipe, $timeout) = @_;
44
45 my $writer = $cmdpipe->{writer};
46 my $reader = $cmdpipe->{reader};
47
48 $writer->close();
49 $reader->close();
50
51 my $cpid = $cmdpipe->{pid};
52
53 if ($timeout) {
54 for (my $i = 0; $i < $timeout; $i++) {
55 return if !PVE::ProcFSTools::check_process_running($cpid);
56 sleep(1);
57 }
58 }
59
60 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
61 kill(15, $cpid);
62
63 # wait again
64 for (my $i = 0; $i < 10; $i++) {
65 return if !PVE::ProcFSTools::check_process_running($cpid);
66 sleep(1);
67 }
68
69 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
70 kill 9, $cpid;
71 sleep 1;
72 }
73
74 sub fork_tunnel {
75 my ($self, $nodeip, $lport, $rport) = @_;
76
77 my $cmd = [@{$self->{rem_ssh}}, '-L', "$lport:localhost:$rport",
78 'qm', 'mtunnel' ];
79
80 my $tunnel = $self->fork_command_pipe($cmd);
81
82 my $reader = $tunnel->{reader};
83
84 my $helo;
85 eval {
86 PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; });
87 die "no reply\n" if !$helo;
88 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
89 die "got strange reply from mtunnel ('$helo')\n"
90 if $helo !~ m/^tunnel online$/;
91 };
92 my $err = $@;
93
94 if ($err) {
95 $self->finish_command_pipe($tunnel);
96 die "can't open migration tunnel - $err";
97 }
98 return $tunnel;
99 }
100
101 sub finish_tunnel {
102 my ($self, $tunnel) = @_;
103
104 my $writer = $tunnel->{writer};
105
106 eval {
107 PVE::Tools::run_with_timeout(30, sub {
108 print $writer "quit\n";
109 $writer->flush();
110 });
111 };
112 my $err = $@;
113
114 $self->finish_command_pipe($tunnel, 30);
115
116 die $err if $err;
117 }
118
119 sub lock_vm {
120 my ($self, $vmid, $code, @param) = @_;
121
122 return PVE::QemuServer::lock_config($vmid, $code, @param);
123 }
124
125 sub prepare {
126 my ($self, $vmid) = @_;
127
128 my $online = $self->{opts}->{online};
129
130 $self->{storecfg} = PVE::Storage::config();
131
132 # test is VM exist
133 my $conf = $self->{vmconf} = PVE::QemuServer::load_config($vmid);
134
135 PVE::QemuServer::check_lock($conf);
136
137 my $running = 0;
138 if (my $pid = PVE::QemuServer::check_running($vmid)) {
139 die "cant migrate running VM without --online\n" if !$online;
140 $running = $pid;
141 }
142
143 if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
144 if ($self->{running} || !$self->{opts}->{force}) {
145 die "can't migrate VM which uses local devices\n";
146 } else {
147 $self->log('info', "migrating VM which uses local devices");
148 }
149 }
150
151 # activate volumes
152 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
153 PVE::Storage::activate_volumes($self->{storecfg}, $vollist);
154
155 # fixme: check if storage is available on both nodes
156
157 # test ssh connection
158 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
159 eval { $self->cmd_quiet($cmd); };
160 die "Can't connect to destination address using public key\n" if $@;
161
162 return $running;
163 }
164
165 sub sync_disks {
166 my ($self, $vmid) = @_;
167
168 $self->log('info', "copying disk images");
169
170 my $conf = $self->{vmconf};
171
172 $self->{volumes} = [];
173
174 my $res = [];
175
176 eval {
177
178 my $volhash = {};
179 my $cdromhash = {};
180
181 my $sharedvm = 1;
182
183 my @sids = PVE::Storage::storage_ids($self->{storecfg});
184 foreach my $storeid (@sids) {
185 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
186 next if $scfg->{shared};
187 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
188
189 # get list from PVE::Storage (for unused volumes)
190 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
191 PVE::Storage::foreach_volid($dl, sub {
192 my ($volid, $sid, $volname) = @_;
193
194 # check if storage is available on target node
195 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
196
197 $volhash->{$volid} = 1;
198 $sharedvm = 0; # there is a non-shared disk
199 });
200 }
201
202 # and add used, owned/non-shared disks (just to be sure we have all)
203
204 PVE::QemuServer::foreach_volid($conf, sub {
205 my ($volid, $is_cdrom) = @_;
206
207 return if !$volid;
208
209 die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
210
211 if ($is_cdrom) {
212 die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
213 return if $volid eq 'none';
214 $cdromhash->{$volid} = 1;
215 }
216
217 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
218
219 # check if storage is available on both nodes
220 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
221 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
222
223 return if $scfg->{shared};
224
225 die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
226
227 $sharedvm = 0;
228
229 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
230
231 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
232 if !$owner || ($owner != $self->{vmid});
233
234 $volhash->{$volid} = 1;
235 });
236
237 if ($self->{running} && !$sharedvm) {
238 die "can't do online migration - VM uses local disks\n";
239 }
240
241 # do some checks first
242 foreach my $volid (keys %$volhash) {
243 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
244 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
245
246 die "can't migrate '$volid' - storagy type '$scfg->{type}' not supported\n"
247 if $scfg->{type} ne 'dir';
248
249 #if file, check if a backing file exist
250 if(($scfg->{type} eq 'dir') && (!$sharedvm)){
251 my (undef, undef, undef, $parent) = PVE::Storage::volume_size_info($self->{storecfg}, $volid, 1);
252 die "can't migrate '$volid' as it's a clone of '$parent'";
253 }
254 }
255
256 foreach my $volid (keys %$volhash) {
257 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
258 push @{$self->{volumes}}, $volid;
259 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
260 }
261 };
262 die "Failed to sync data - $@" if $@;
263 }
264
265 sub phase1 {
266 my ($self, $vmid) = @_;
267
268 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
269
270 my $conf = $self->{vmconf};
271
272 # set migrate lock in config file
273 $conf->{lock} = 'migrate';
274 PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
275
276 sync_disks($self, $vmid);
277
278 };
279
280 sub phase1_cleanup {
281 my ($self, $vmid, $err) = @_;
282
283 $self->log('info', "aborting phase 1 - cleanup resources");
284
285 my $conf = $self->{vmconf};
286 delete $conf->{lock};
287 eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) };
288 if (my $err = $@) {
289 $self->log('err', $err);
290 }
291
292 if ($self->{volumes}) {
293 foreach my $volid (@{$self->{volumes}}) {
294 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
295 # fixme: try to remove ?
296 }
297 }
298 }
299
300 sub phase2 {
301 my ($self, $vmid) = @_;
302
303 my $conf = $self->{vmconf};
304
305 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
306
307 my $rport;
308
309 my $nodename = PVE::INotify::nodename();
310
311 ## start on remote node
312 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'start',
313 $vmid, '--stateuri', 'tcp', '--skiplock', '--migratedfrom', $nodename];
314
315 PVE::Tools::run_command($cmd, outfunc => sub {
316 my $line = shift;
317
318 if ($line =~ m/^migration listens on port (\d+)$/) {
319 $rport = $1;
320 }
321 }, errfunc => sub {
322 my $line = shift;
323 $self->log('info', $line);
324 });
325
326 die "unable to detect remote migration port\n" if !$rport;
327
328 $self->log('info', "starting migration tunnel");
329
330 ## create tunnel to remote port
331 my $lport = PVE::QemuServer::next_migrate_port();
332 $self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport);
333
334 $self->log('info', "starting online/live migration on port $lport");
335 # start migration
336
337 my $start = time();
338
339 # load_defaults
340 my $defaults = PVE::QemuServer::load_defaults();
341
342 # always set migrate speed (overwrite kvm default of 32m)
343 # we set a very hight default of 8192m which is basically unlimited
344 my $migrate_speed = $defaults->{migrate_speed} || 8192;
345 $migrate_speed = $conf->{migrate_speed} || $migrate_speed;
346 $migrate_speed = $migrate_speed * 1048576;
347 $self->log('info', "migrate_set_speed: $migrate_speed");
348 eval {
349 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed));
350 };
351 $self->log('info', "migrate_set_speed error: $@") if $@;
352
353 my $migrate_downtime = $defaults->{migrate_downtime};
354 $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
355 if (defined($migrate_downtime)) {
356 $self->log('info', "migrate_set_downtime: $migrate_downtime");
357 eval {
358 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
359 };
360 $self->log('info', "migrate_set_downtime error: $@") if $@;
361 }
362
363 my $capabilities = {};
364 $capabilities->{capability} = "xbzrle";
365 $capabilities->{state} = JSON::false;
366
367 eval {
368 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-capabilities", capabilities => [$capabilities]);
369 };
370
371 #set cachesize 10% of the total memory
372 my $cachesize = int($conf->{memory}*1048576/10);
373 eval {
374 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => $cachesize);
375 };
376
377 eval {
378 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:localhost:$lport");
379 };
380 my $merr = $@;
381
382 my $lstat = 0;
383 my $usleep = 2000000;
384 my $i = 0;
385 my $err_count = 0;
386 my $lastrem = undef;
387 my $downtimecounter = 0;
388 while (1) {
389 $i++;
390 my $avglstat = $lstat/$i if $lstat;
391
392 usleep($usleep);
393 my $stat;
394 eval {
395 $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
396 };
397 if (my $err = $@) {
398 $err_count++;
399 warn "query migrate failed: $err\n";
400 if ($err_count <= 5) {
401 usleep(1000000);
402 next;
403 }
404 die "too many query migrate failures - aborting\n";
405 }
406 if ($stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) {
407 $merr = undef;
408 $err_count = 0;
409 if ($stat->{status} eq 'completed') {
410 my $delay = time() - $start;
411 if ($delay > 0) {
412 my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
413 my $downtime = $stat->{downtime} || 0;
414 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
415 }
416 }
417
418 if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') {
419 die "aborting\n"
420 }
421
422 if ($stat->{status} ne 'active') {
423 $self->log('info', "migration status: $stat->{status}");
424 last;
425 }
426
427 if ($stat->{ram}->{transferred} ne $lstat) {
428 my $trans = $stat->{ram}->{transferred} || 0;
429 my $rem = $stat->{ram}->{remaining} || 0;
430 my $total = $stat->{ram}->{total} || 0;
431 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
432 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
433 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
434 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
435 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
436 #reduce sleep if remainig memory if lower than the everage transfert
437 $usleep = 300000 if $avglstat && $rem < $avglstat;
438
439 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
440 "remaining ${rem}), total ${total})");
441
442 #$self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
443 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
444 $downtimecounter++;
445 }
446 $lastrem = $rem;
447
448 if ($downtimecounter > 5) {
449 $downtimecounter = 0;
450 $migrate_downtime *= 2;
451 $self->log('info', "migrate_set_downtime: $migrate_downtime");
452 eval {
453 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100);
454 };
455 $self->log('info', "migrate_set_downtime error: $@") if $@;
456 }
457
458 }
459
460
461 $lstat = $stat->{ram}->{transferred};
462
463 } else {
464 die $merr if $merr;
465 die "unable to parse migration status '$stat->{status}' - aborting\n";
466 }
467 }
468 }
469
470 sub phase2_cleanup {
471 my ($self, $vmid, $err) = @_;
472
473 return if !$self->{errors};
474 $self->{phase2errors} = 1;
475
476 $self->log('info', "aborting phase 2 - cleanup resources");
477
478 $self->log('info', "migrate_cancel");
479 eval {
480 PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
481 };
482 $self->log('info', "migrate_cancel error: $@") if $@;
483
484 my $conf = $self->{vmconf};
485 delete $conf->{lock};
486 eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) };
487 if (my $err = $@) {
488 $self->log('err', $err);
489 }
490
491 # cleanup ressources on target host
492 my $nodename = PVE::INotify::nodename();
493
494 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
495 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
496 if (my $err = $@) {
497 $self->log('err', $err);
498 $self->{errors} = 1;
499 }
500 }
501
502 sub phase3 {
503 my ($self, $vmid) = @_;
504
505 my $volids = $self->{volumes};
506 return if $self->{phase2errors};
507
508 # destroy local copies
509 foreach my $volid (@$volids) {
510 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
511 if (my $err = $@) {
512 $self->log('err', "removing local copy of '$volid' failed - $err");
513 $self->{errors} = 1;
514 last if $err =~ /^interrupted by signal$/;
515 }
516 }
517 }
518
519 sub phase3_cleanup {
520 my ($self, $vmid, $err) = @_;
521
522 my $conf = $self->{vmconf};
523 return if $self->{phase2errors};
524
525 # move config to remote node
526 my $conffile = PVE::QemuServer::config_file($vmid);
527 my $newconffile = PVE::QemuServer::config_file($vmid, $self->{node});
528
529 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
530 if !rename($conffile, $newconffile);
531
532 # now that config file is move, we can resume vm on target if livemigrate
533 if ($self->{tunnel}) {
534 my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock'];
535 eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
536 if (my $err = $@) {
537 $self->log('err', $err);
538 $self->{errors} = 1;
539 }
540 }
541
542 # always stop local VM
543 eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
544 if (my $err = $@) {
545 $self->log('err', "stopping vm failed - $err");
546 $self->{errors} = 1;
547 }
548
549 if ($self->{tunnel}) {
550 eval { finish_tunnel($self, $self->{tunnel}); };
551 if (my $err = $@) {
552 $self->log('err', $err);
553 $self->{errors} = 1;
554 }
555 }
556
557 # always deactivate volumes - avoid lvm LVs to be active on several nodes
558 eval {
559 my $vollist = PVE::QemuServer::get_vm_volumes($conf);
560 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
561 };
562 if (my $err = $@) {
563 $self->log('err', $err);
564 $self->{errors} = 1;
565 }
566
567 # clear migrate lock
568 my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
569 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
570 }
571
572 sub final_cleanup {
573 my ($self, $vmid) = @_;
574
575 # nothing to do
576 }
577
578 1;