]>
git.proxmox.com Git - qemu-server.git/blob - PVE/QemuMigrate.pm
1 package PVE
::QemuMigrate
;
5 use PVE
::AbstractMigrate
;
13 use Time
::HiRes
qw( usleep );
14 use PVE
::RPCEnvironment
;
16 use base
qw(PVE::AbstractMigrate);
18 sub fork_command_pipe
{
19 my ($self, $cmd) = @_;
21 my $reader = IO
::File-
>new();
22 my $writer = IO
::File-
>new();
28 eval { $cpid = open2
($reader, $writer, @$cmd); };
33 if ($orig_pid != $$) {
34 $self->log('err', "can't fork command pipe\n");
41 return { writer
=> $writer, reader
=> $reader, pid
=> $cpid };
44 sub finish_command_pipe
{
45 my ($self, $cmdpipe, $timeout) = @_;
47 my $writer = $cmdpipe->{writer
};
48 my $reader = $cmdpipe->{reader
};
53 my $cpid = $cmdpipe->{pid
};
56 for (my $i = 0; $i < $timeout; $i++) {
57 return if !PVE
::ProcFSTools
::check_process_running
($cpid);
62 $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n");
66 for (my $i = 0; $i < 10; $i++) {
67 return if !PVE
::ProcFSTools
::check_process_running
($cpid);
71 $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n");
77 my ($self, $nodeip, $lport, $rport) = @_;
79 my @localtunnelinfo = $lport ?
('-L' , "$lport:localhost:$rport" ) : ();
81 my $cmd = [@{$self->{rem_ssh
}}, @localtunnelinfo, 'qm', 'mtunnel' ];
83 my $tunnel = $self->fork_command_pipe($cmd);
85 my $reader = $tunnel->{reader
};
89 PVE
::Tools
::run_with_timeout
(60, sub { $helo = <$reader>; });
90 die "no reply\n" if !$helo;
91 die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
92 die "got strange reply from mtunnel ('$helo')\n"
93 if $helo !~ m/^tunnel online$/;
98 $self->finish_command_pipe($tunnel);
99 die "can't open migration tunnel - $err";
105 my ($self, $tunnel) = @_;
107 my $writer = $tunnel->{writer
};
110 PVE
::Tools
::run_with_timeout
(30, sub {
111 print $writer "quit\n";
117 $self->finish_command_pipe($tunnel, 30);
123 my ($self, $vmid, $code, @param) = @_;
125 return PVE
::QemuConfig-
>lock_config($vmid, $code, @param);
129 my ($self, $vmid) = @_;
131 my $online = $self->{opts
}->{online
};
133 $self->{storecfg
} = PVE
::Storage
::config
();
136 my $conf = $self->{vmconf
} = PVE
::QemuConfig-
>load_config($vmid);
138 PVE
::QemuConfig-
>check_lock($conf);
141 if (my $pid = PVE
::QemuServer
::check_running
($vmid)) {
142 die "cant migrate running VM without --online\n" if !$online;
145 $self->{forcemachine
} = PVE
::QemuServer
::qemu_machine_pxe
($vmid, $conf);
149 if (my $loc_res = PVE
::QemuServer
::check_local_resources
($conf, 1)) {
150 if ($self->{running
} || !$self->{opts
}->{force
}) {
151 die "can't migrate VM which uses local devices\n";
153 $self->log('info', "migrating VM which uses local devices");
158 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
159 PVE
::Storage
::activate_volumes
($self->{storecfg
}, $vollist);
161 # fixme: check if storage is available on both nodes
163 # test ssh connection
164 my $cmd = [ @{$self->{rem_ssh
}}, '/bin/true' ];
165 eval { $self->cmd_quiet($cmd); };
166 die "Can't connect to destination address using public key\n" if $@;
172 my ($self, $vmid) = @_;
174 $self->log('info', "copying disk images");
176 my $conf = $self->{vmconf
};
178 $self->{volumes
} = [];
189 my @sids = PVE
::Storage
::storage_ids
($self->{storecfg
});
190 foreach my $storeid (@sids) {
191 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $storeid);
192 next if $scfg->{shared
};
193 next if !PVE
::Storage
::storage_check_enabled
($self->{storecfg
}, $storeid, undef, 1);
195 # get list from PVE::Storage (for unused volumes)
196 my $dl = PVE
::Storage
::vdisk_list
($self->{storecfg
}, $storeid, $vmid);
197 PVE
::Storage
::foreach_volid
($dl, sub {
198 my ($volid, $sid, $volname) = @_;
200 # check if storage is available on target node
201 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid, $self->{node
});
203 $volhash->{$volid} = 1;
204 $sharedvm = 0; # there is a non-shared disk
208 # and add used, owned/non-shared disks (just to be sure we have all)
210 PVE
::QemuServer
::foreach_volid
($conf, sub {
211 my ($volid, $is_cdrom) = @_;
215 die "cant migrate local file/device '$volid'\n" if $volid =~ m
|^/|;
218 die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
219 return if $volid eq 'none';
220 $cdromhash->{$volid} = 1;
223 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
225 # check if storage is available on both nodes
226 my $scfg = PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid);
227 PVE
::Storage
::storage_check_node
($self->{storecfg
}, $sid, $self->{node
});
229 return if $scfg->{shared
};
231 die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
235 my ($path, $owner) = PVE
::Storage
::path
($self->{storecfg
}, $volid);
237 die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
238 if !$owner || ($owner != $self->{vmid
});
240 $volhash->{$volid} = 1;
243 if ($self->{running
} && !$sharedvm) {
244 die "can't do online migration - VM uses local disks\n";
247 # do some checks first
248 foreach my $volid (keys %$volhash) {
249 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
250 my $scfg = PVE
::Storage
::storage_config
($self->{storecfg
}, $sid);
252 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
253 if (!($scfg->{type
} eq 'dir' || $scfg->{type
} eq 'zfspool') && (!$sharedvm));
255 # if file, check if a backing file exist
256 if (!($scfg->{type
} eq 'dir' || $scfg->{type
} eq 'zfspool') && (!$sharedvm)) {
257 my (undef, undef, undef, $parent) = PVE
::Storage
::volume_size_info
($self->{storecfg
}, $volid, 1);
258 die "can't migrate '$volid' as it's a clone of '$parent'" if $parent;
262 foreach my $volid (keys %$volhash) {
263 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
264 push @{$self->{volumes
}}, $volid;
265 PVE
::Storage
::storage_migrate
($self->{storecfg
}, $volid, $self->{nodeip
}, $sid);
268 die "Failed to sync data - $@" if $@;
272 my ($self, $vmid) = @_;
274 $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})");
276 my $conf = $self->{vmconf
};
278 # set migrate lock in config file
279 $conf->{lock} = 'migrate';
280 PVE
::QemuConfig-
>write_config($vmid, $conf);
282 sync_disks
($self, $vmid);
287 my ($self, $vmid, $err) = @_;
289 $self->log('info', "aborting phase 1 - cleanup resources");
291 my $conf = $self->{vmconf
};
292 delete $conf->{lock};
293 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
295 $self->log('err', $err);
298 if ($self->{volumes
}) {
299 foreach my $volid (@{$self->{volumes
}}) {
300 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
301 # fixme: try to remove ?
307 my ($self, $vmid) = @_;
309 my $conf = $self->{vmconf
};
311 $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
315 my $nodename = PVE
::INotify
::nodename
();
317 ## start on remote node
318 my $cmd = [@{$self->{rem_ssh
}}];
321 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
322 my $res = PVE
::QemuServer
::vm_mon_cmd
($vmid, 'query-spice');
323 $spice_ticket = $res->{ticket
};
326 push @$cmd , 'qm', 'start', $vmid, '--stateuri', 'tcp', '--skiplock', '--migratedfrom', $nodename;
328 if ($self->{forcemachine
}) {
329 push @$cmd, '--machine', $self->{forcemachine
};
334 # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
335 # instead we pipe it through STDIN
336 PVE
::Tools
::run_command
($cmd, input
=> $spice_ticket, outfunc
=> sub {
339 if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) {
343 elsif ($line =~ m/^migration listens on port (\d+)$/) {
344 $raddr = "localhost";
347 elsif ($line =~ m/^spice listens on port (\d+)$/) {
348 $spice_port = int($1);
352 $self->log('info', $line);
355 die "unable to detect remote migration address\n" if !$raddr;
357 ## create tunnel to remote port
358 $self->log('info', "starting ssh migration tunnel");
359 my $pfamily = PVE
::Tools
::get_host_address_family
($nodename);
360 my $lport = ($raddr eq "localhost") ? PVE
::Tools
::next_migrate_port
($pfamily) : undef;
361 $self->{tunnel
} = $self->fork_tunnel($self->{nodeip
}, $lport, $rport);
364 $self->log('info', "starting online/live migration on $raddr:$rport");
365 $self->{livemigration
} = 1;
368 my $defaults = PVE
::QemuServer
::load_defaults
();
370 # always set migrate speed (overwrite kvm default of 32m)
371 # we set a very hight default of 8192m which is basically unlimited
372 my $migrate_speed = $defaults->{migrate_speed
} || 8192;
373 $migrate_speed = $conf->{migrate_speed
} || $migrate_speed;
374 $migrate_speed = $migrate_speed * 1048576;
375 $self->log('info', "migrate_set_speed: $migrate_speed");
377 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_speed", value
=> int($migrate_speed));
379 $self->log('info', "migrate_set_speed error: $@") if $@;
381 my $migrate_downtime = $defaults->{migrate_downtime
};
382 $migrate_downtime = $conf->{migrate_downtime
} if defined($conf->{migrate_downtime
});
383 if (defined($migrate_downtime)) {
384 $self->log('info', "migrate_set_downtime: $migrate_downtime");
386 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_downtime", value
=> int($migrate_downtime*100)/100);
388 $self->log('info', "migrate_set_downtime error: $@") if $@;
392 PVE
::QemuServer
::set_migration_caps
($vmid);
396 #set cachesize 10% of the total memory
397 my $cachesize = int($conf->{memory
}*1048576/10);
399 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate-set-cache-size", value
=> $cachesize);
402 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
})) {
403 my $rpcenv = PVE
::RPCEnvironment
::get
();
404 my $authuser = $rpcenv->get_user();
406 my (undef, $proxyticket) = PVE
::AccessControl
::assemble_spice_ticket
($authuser, $vmid, $self->{node
});
408 my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
409 my $subject = PVE
::AccessControl
::read_x509_subject_spice
($filename);
411 $self->log('info', "spice client_migrate_info");
414 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "client_migrate_info", protocol
=> 'spice',
415 hostname
=> $proxyticket, 'tls-port' => $spice_port,
416 'cert-subject' => $subject);
418 $self->log('info', "client_migrate_info error: $@") if $@;
423 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate", uri
=> "tcp:$raddr:$rport");
426 $self->log('info', "migrate uri => tcp:$raddr:$rport failed: $merr") if $merr;
429 my $usleep = 2000000;
433 my $downtimecounter = 0;
436 my $avglstat = $lstat/$i if $lstat;
441 $stat = PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "query-migrate");
445 warn "query migrate failed: $err\n";
446 if ($err_count <= 5) {
450 die "too many query migrate failures - aborting\n";
453 if ($stat->{status
} =~ m/^(setup)$/im) {
458 if ($stat->{status
} =~ m/^(active|completed|failed|cancelled)$/im) {
461 if ($stat->{status
} eq 'completed') {
462 my $delay = time() - $start;
464 my $mbps = sprintf "%.2f", $conf->{memory
}/$delay;
465 my $downtime = $stat->{downtime
} || 0;
466 $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms");
470 if ($stat->{status
} eq 'failed' || $stat->{status
} eq 'cancelled') {
474 if ($stat->{status
} ne 'active') {
475 $self->log('info', "migration status: $stat->{status}");
479 if ($stat->{ram
}->{transferred
} ne $lstat) {
480 my $trans = $stat->{ram
}->{transferred
} || 0;
481 my $rem = $stat->{ram
}->{remaining
} || 0;
482 my $total = $stat->{ram
}->{total
} || 0;
483 my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0;
484 my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0;
485 my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0;
486 my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0;
487 my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0;
488 #reduce sleep if remainig memory if lower than the everage transfert
489 $usleep = 300000 if $avglstat && $rem < $avglstat;
491 $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " .
492 "remaining ${rem}), total ${total})");
494 if (${xbzrlecachesize
}) {
495 $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}");
498 if (($lastrem && $rem > $lastrem ) || ($rem == 0)) {
503 if ($downtimecounter > 5) {
504 $downtimecounter = 0;
505 $migrate_downtime *= 2;
506 $self->log('info', "migrate_set_downtime: $migrate_downtime");
508 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_set_downtime", value
=> int($migrate_downtime*100)/100);
510 $self->log('info', "migrate_set_downtime error: $@") if $@;
516 $lstat = $stat->{ram
}->{transferred
};
520 die "unable to parse migration status '$stat->{status}' - aborting\n";
523 #to be sure tat the tunnel is closed
524 if ($self->{tunnel
}) {
525 eval { finish_tunnel
($self, $self->{tunnel
}); };
527 $self->log('err', $err);
534 my ($self, $vmid, $err) = @_;
536 return if !$self->{errors
};
537 $self->{phase2errors
} = 1;
539 $self->log('info', "aborting phase 2 - cleanup resources");
541 $self->log('info', "migrate_cancel");
543 PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, "migrate_cancel");
545 $self->log('info', "migrate_cancel error: $@") if $@;
547 my $conf = $self->{vmconf
};
548 delete $conf->{lock};
549 eval { PVE
::QemuConfig-
>write_config($vmid, $conf) };
551 $self->log('err', $err);
554 # cleanup ressources on target host
555 my $nodename = PVE
::INotify
::nodename
();
557 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
558 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {}, errfunc
=> sub {}) };
560 $self->log('err', $err);
564 if ($self->{tunnel
}) {
565 eval { finish_tunnel
($self, $self->{tunnel
}); };
567 $self->log('err', $err);
574 my ($self, $vmid) = @_;
576 my $volids = $self->{volumes
};
577 return if $self->{phase2errors
};
579 # destroy local copies
580 foreach my $volid (@$volids) {
581 eval { PVE
::Storage
::vdisk_free
($self->{storecfg
}, $volid); };
583 $self->log('err', "removing local copy of '$volid' failed - $err");
585 last if $err =~ /^interrupted by signal$/;
591 my ($self, $vmid, $err) = @_;
593 my $conf = $self->{vmconf
};
594 return if $self->{phase2errors
};
596 # move config to remote node
597 my $conffile = PVE
::QemuConfig-
>config_file($vmid);
598 my $newconffile = PVE
::QemuConfig-
>config_file($vmid, $self->{node
});
600 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
601 if !rename($conffile, $newconffile);
603 if ($self->{livemigration
}) {
604 # now that config file is move, we can resume vm on target if livemigrate
605 my $cmd = [@{$self->{rem_ssh
}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
606 eval{ PVE
::Tools
::run_command
($cmd, outfunc
=> sub {},
609 $self->log('err', $line);
613 $self->log('err', $err);
621 if (PVE
::QemuServer
::vga_conf_has_spice
($conf->{vga
}) && $self->{running
}) {
622 $self->log('info', "Waiting for spice server migration");
624 my $res = PVE
::QemuServer
::vm_mon_cmd_nocheck
($vmid, 'query-spice');
625 last if int($res->{'migrated'}) == 1;
633 # always stop local VM
634 eval { PVE
::QemuServer
::vm_stop
($self->{storecfg
}, $vmid, 1, 1); };
636 $self->log('err', "stopping vm failed - $err");
640 # always deactivate volumes - avoid lvm LVs to be active on several nodes
642 my $vollist = PVE
::QemuServer
::get_vm_volumes
($conf);
643 PVE
::Storage
::deactivate_volumes
($self->{storecfg
}, $vollist);
646 $self->log('err', $err);
651 my $cmd = [ @{$self->{rem_ssh
}}, 'qm', 'unlock', $vmid ];
652 $self->cmd_logerr($cmd, errmsg
=> "failed to clear migrate lock");
656 my ($self, $vmid) = @_;