]>
Commit | Line | Data |
---|---|---|
1 | package PVE::QemuMigrate; | |
2 | ||
3 | use strict; | |
4 | use warnings; | |
5 | use PVE::AbstractMigrate; | |
6 | use IO::File; | |
7 | use IPC::Open2; | |
8 | use POSIX qw( WNOHANG ); | |
9 | use PVE::INotify; | |
10 | use PVE::Tools; | |
11 | use PVE::Cluster; | |
12 | use PVE::Storage; | |
13 | use PVE::QemuServer; | |
14 | use Time::HiRes qw( usleep ); | |
15 | use PVE::RPCEnvironment; | |
16 | ||
17 | use base qw(PVE::AbstractMigrate); | |
18 | ||
19 | sub fork_command_pipe { | |
20 | my ($self, $cmd) = @_; | |
21 | ||
22 | my $reader = IO::File->new(); | |
23 | my $writer = IO::File->new(); | |
24 | ||
25 | my $orig_pid = $$; | |
26 | ||
27 | my $cpid; | |
28 | ||
29 | eval { $cpid = open2($reader, $writer, @$cmd); }; | |
30 | ||
31 | my $err = $@; | |
32 | ||
33 | # catch exec errors | |
34 | if ($orig_pid != $$) { | |
35 | $self->log('err', "can't fork command pipe\n"); | |
36 | POSIX::_exit(1); | |
37 | kill('KILL', $$); | |
38 | } | |
39 | ||
40 | die $err if $err; | |
41 | ||
42 | return { writer => $writer, reader => $reader, pid => $cpid }; | |
43 | } | |
44 | ||
45 | sub finish_command_pipe { | |
46 | my ($self, $cmdpipe, $timeout) = @_; | |
47 | ||
48 | my $cpid = $cmdpipe->{pid}; | |
49 | return if !defined($cpid); | |
50 | ||
51 | my $writer = $cmdpipe->{writer}; | |
52 | my $reader = $cmdpipe->{reader}; | |
53 | ||
54 | $writer->close(); | |
55 | $reader->close(); | |
56 | ||
57 | my $collect_child_process = sub { | |
58 | my $res = waitpid($cpid, WNOHANG); | |
59 | if (defined($res) && ($res == $cpid)) { | |
60 | delete $cmdpipe->{cpid}; | |
61 | return 1; | |
62 | } else { | |
63 | return 0; | |
64 | } | |
65 | }; | |
66 | ||
67 | if ($timeout) { | |
68 | for (my $i = 0; $i < $timeout; $i++) { | |
69 | return if &$collect_child_process(); | |
70 | sleep(1); | |
71 | } | |
72 | } | |
73 | ||
74 | $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n"); | |
75 | kill(15, $cpid); | |
76 | ||
77 | # wait again | |
78 | for (my $i = 0; $i < 10; $i++) { | |
79 | return if &$collect_child_process(); | |
80 | sleep(1); | |
81 | } | |
82 | ||
83 | $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n"); | |
84 | kill 9, $cpid; | |
85 | sleep 1; | |
86 | ||
87 | $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n") | |
88 | if !&$collect_child_process(); | |
89 | } | |
90 | ||
91 | sub fork_tunnel { | |
92 | my ($self, $tunnel_addr) = @_; | |
93 | ||
94 | my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : (); | |
95 | ||
96 | my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ]; | |
97 | ||
98 | my $tunnel = $self->fork_command_pipe($cmd); | |
99 | ||
100 | my $reader = $tunnel->{reader}; | |
101 | ||
102 | my $helo; | |
103 | eval { | |
104 | PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; }); | |
105 | die "no reply\n" if !$helo; | |
106 | die "no quorum on target node\n" if $helo =~ m/^no quorum$/; | |
107 | die "got strange reply from mtunnel ('$helo')\n" | |
108 | if $helo !~ m/^tunnel online$/; | |
109 | }; | |
110 | my $err = $@; | |
111 | ||
112 | if ($err) { | |
113 | $self->finish_command_pipe($tunnel); | |
114 | die "can't open migration tunnel - $err"; | |
115 | } | |
116 | return $tunnel; | |
117 | } | |
118 | ||
119 | sub finish_tunnel { | |
120 | my ($self, $tunnel) = @_; | |
121 | ||
122 | my $writer = $tunnel->{writer}; | |
123 | ||
124 | eval { | |
125 | PVE::Tools::run_with_timeout(30, sub { | |
126 | print $writer "quit\n"; | |
127 | $writer->flush(); | |
128 | }); | |
129 | }; | |
130 | my $err = $@; | |
131 | ||
132 | $self->finish_command_pipe($tunnel, 30); | |
133 | ||
134 | if ($tunnel->{sock_addr}) { | |
135 | # ssh does not clean up on local host | |
136 | my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; # | |
137 | PVE::Tools::run_command($cmd); | |
138 | ||
139 | # .. and just to be sure check on remote side | |
140 | unshift @{$cmd}, @{$self->{rem_ssh}}; | |
141 | PVE::Tools::run_command($cmd); | |
142 | } | |
143 | ||
144 | die $err if $err; | |
145 | } | |
146 | ||
147 | sub lock_vm { | |
148 | my ($self, $vmid, $code, @param) = @_; | |
149 | ||
150 | return PVE::QemuConfig->lock_config($vmid, $code, @param); | |
151 | } | |
152 | ||
153 | sub prepare { | |
154 | my ($self, $vmid) = @_; | |
155 | ||
156 | my $online = $self->{opts}->{online}; | |
157 | ||
158 | $self->{storecfg} = PVE::Storage::config(); | |
159 | ||
160 | # test if VM exists | |
161 | my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid); | |
162 | ||
163 | PVE::QemuConfig->check_lock($conf); | |
164 | ||
165 | my $running = 0; | |
166 | if (my $pid = PVE::QemuServer::check_running($vmid)) { | |
167 | die "can't migrate running VM without --online\n" if !$online; | |
168 | $running = $pid; | |
169 | ||
170 | $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf); | |
171 | ||
172 | } | |
173 | ||
174 | if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) { | |
175 | if ($self->{running} || !$self->{opts}->{force}) { | |
176 | die "can't migrate VM which uses local devices\n"; | |
177 | } else { | |
178 | $self->log('info', "migrating VM which uses local devices"); | |
179 | } | |
180 | } | |
181 | ||
182 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
183 | ||
184 | my $need_activate = []; | |
185 | foreach my $volid (@$vollist) { | |
186 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1); | |
187 | ||
188 | # check if storage is available on both nodes | |
189 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
190 | PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node}); | |
191 | ||
192 | if ($scfg->{shared}) { | |
193 | # PVE::Storage::activate_storage checks this for non-shared storages | |
194 | my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); | |
195 | warn "Used shared storage '$sid' is not online on source node!\n" | |
196 | if !$plugin->check_connection($sid, $scfg); | |
197 | } else { | |
198 | # only activate if not shared | |
199 | push @$need_activate, $volid; | |
200 | } | |
201 | } | |
202 | ||
203 | # activate volumes | |
204 | PVE::Storage::activate_volumes($self->{storecfg}, $need_activate); | |
205 | ||
206 | # test ssh connection | |
207 | my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ]; | |
208 | eval { $self->cmd_quiet($cmd); }; | |
209 | die "Can't connect to destination address using public key\n" if $@; | |
210 | ||
211 | return $running; | |
212 | } | |
213 | ||
214 | sub sync_disks { | |
215 | my ($self, $vmid) = @_; | |
216 | ||
217 | $self->log('info', "copying disk images"); | |
218 | ||
219 | my $conf = $self->{vmconf}; | |
220 | ||
221 | $self->{volumes} = []; | |
222 | ||
223 | my $res = []; | |
224 | ||
225 | eval { | |
226 | ||
227 | my $volhash = {}; | |
228 | ||
229 | my $sharedvm = 1; | |
230 | ||
231 | my @sids = PVE::Storage::storage_ids($self->{storecfg}); | |
232 | foreach my $storeid (@sids) { | |
233 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid); | |
234 | next if $scfg->{shared}; | |
235 | next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1); | |
236 | ||
237 | # get list from PVE::Storage (for unused volumes) | |
238 | my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid); | |
239 | ||
240 | next if @{$dl->{$storeid}} == 0; | |
241 | ||
242 | # check if storage is available on target node | |
243 | PVE::Storage::storage_check_node($self->{storecfg}, $storeid, $self->{node}); | |
244 | $sharedvm = 0; # there is a non-shared disk | |
245 | ||
246 | PVE::Storage::foreach_volid($dl, sub { | |
247 | my ($volid, $sid, $volname) = @_; | |
248 | ||
249 | $volhash->{$volid} = 1; | |
250 | }); | |
251 | } | |
252 | ||
253 | my $test_volid = sub { | |
254 | my ($volid, $is_cdrom, $snapname) = @_; | |
255 | ||
256 | return if !$volid; | |
257 | ||
258 | die "can't migrate local file/device '$volid'\n" if $volid =~ m|^/|; | |
259 | ||
260 | if ($is_cdrom) { | |
261 | die "can't migrate local cdrom drive\n" if $volid eq 'cdrom'; | |
262 | return if $volid eq 'none'; | |
263 | } | |
264 | ||
265 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
266 | ||
267 | # check if storage is available on both nodes | |
268 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
269 | PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node}); | |
270 | ||
271 | return if $scfg->{shared}; | |
272 | ||
273 | $sharedvm = 0; | |
274 | ||
275 | die "can't migrate local cdrom '$volid'\n" if $is_cdrom; | |
276 | ||
277 | my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid); | |
278 | ||
279 | die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n" | |
280 | if !$owner || ($owner != $self->{vmid}); | |
281 | ||
282 | if (defined($snapname)) { | |
283 | # we cannot migrate shapshots on local storage | |
284 | # exceptions: 'zfspool' or 'qcow2' files (on directory storage) | |
285 | ||
286 | my $format = PVE::QemuServer::qemu_img_format($scfg, $volname); | |
287 | ||
288 | if (($scfg->{type} eq 'zfspool') || ($format eq 'qcow2')) { | |
289 | $volhash->{$volid} = 1; | |
290 | return; | |
291 | } | |
292 | ||
293 | die "can't migrate snapshot of local volume '$volid'\n"; | |
294 | ||
295 | } else { | |
296 | $volhash->{$volid} = 1; | |
297 | } | |
298 | }; | |
299 | ||
300 | my $test_drive = sub { | |
301 | my ($ds, $drive, $snapname) = @_; | |
302 | ||
303 | &$test_volid($drive->{file}, PVE::QemuServer::drive_is_cdrom($drive), $snapname); | |
304 | }; | |
305 | ||
306 | PVE::QemuServer::foreach_drive($conf, $test_drive); | |
307 | foreach my $snapname (keys %{$conf->{snapshots}}) { | |
308 | &$test_volid($conf->{snapshots}->{$snapname}->{'vmstate'}, 0, undef) | |
309 | if defined($conf->{snapshots}->{$snapname}->{'vmstate'}); | |
310 | PVE::QemuServer::foreach_drive($conf->{snapshots}->{$snapname}, $test_drive, $snapname); | |
311 | } | |
312 | ||
313 | if ($self->{running} && !$sharedvm) { | |
314 | die "can't do online migration - VM uses local disks\n"; | |
315 | } | |
316 | ||
317 | # additional checks for local storage | |
318 | foreach my $volid (keys %$volhash) { | |
319 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
320 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid); | |
321 | ||
322 | my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') || | |
323 | ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm'); | |
324 | ||
325 | die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n" | |
326 | if !$migratable; | |
327 | ||
328 | # image is a linked clone on local storage, se we can't migrate. | |
329 | if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) { | |
330 | die "can't migrate '$volid' as it's a clone of '$basename'"; | |
331 | } | |
332 | } | |
333 | ||
334 | foreach my $volid (keys %$volhash) { | |
335 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
336 | push @{$self->{volumes}}, $volid; | |
337 | PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid); | |
338 | } | |
339 | }; | |
340 | die "Failed to sync data - $@" if $@; | |
341 | } | |
342 | ||
343 | sub phase1 { | |
344 | my ($self, $vmid) = @_; | |
345 | ||
346 | $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})"); | |
347 | ||
348 | my $conf = $self->{vmconf}; | |
349 | ||
350 | # set migrate lock in config file | |
351 | $conf->{lock} = 'migrate'; | |
352 | PVE::QemuConfig->write_config($vmid, $conf); | |
353 | ||
354 | sync_disks($self, $vmid); | |
355 | ||
356 | }; | |
357 | ||
358 | sub phase1_cleanup { | |
359 | my ($self, $vmid, $err) = @_; | |
360 | ||
361 | $self->log('info', "aborting phase 1 - cleanup resources"); | |
362 | ||
363 | my $conf = $self->{vmconf}; | |
364 | delete $conf->{lock}; | |
365 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; | |
366 | if (my $err = $@) { | |
367 | $self->log('err', $err); | |
368 | } | |
369 | ||
370 | if ($self->{volumes}) { | |
371 | foreach my $volid (@{$self->{volumes}}) { | |
372 | $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'"); | |
373 | # fixme: try to remove ? | |
374 | } | |
375 | } | |
376 | } | |
377 | ||
378 | sub phase2 { | |
379 | my ($self, $vmid) = @_; | |
380 | ||
381 | my $conf = $self->{vmconf}; | |
382 | ||
383 | $self->log('info', "starting VM $vmid on remote node '$self->{node}'"); | |
384 | ||
385 | my $raddr; | |
386 | my $rport; | |
387 | my $ruri; # the whole migration dst. URI (protocol:address[:port]) | |
388 | my $nodename = PVE::INotify::nodename(); | |
389 | ||
390 | ## start on remote node | |
391 | my $cmd = [@{$self->{rem_ssh}}]; | |
392 | ||
393 | my $spice_ticket; | |
394 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { | |
395 | my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice'); | |
396 | $spice_ticket = $res->{ticket}; | |
397 | } | |
398 | ||
399 | push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename; | |
400 | ||
401 | # we use TCP only for unsecure migrations as TCP ssh forward tunnels often | |
402 | # did appeared to late (they are hard, if not impossible, to check for) | |
403 | # secure migration use UNIX sockets now, this *breaks* compatibilty when trying | |
404 | # to migrate from new to old but *not* from old to new. | |
405 | my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg'); | |
406 | my $secure_migration = ($datacenterconf->{migration_unsecure}) ? 0 : 1; | |
407 | ||
408 | if (!$secure_migration) { | |
409 | push @$cmd, '--stateuri', 'tcp'; | |
410 | } else { | |
411 | push @$cmd, '--stateuri', 'unix'; | |
412 | } | |
413 | ||
414 | if ($self->{forcemachine}) { | |
415 | push @$cmd, '--machine', $self->{forcemachine}; | |
416 | } | |
417 | ||
418 | my $spice_port; | |
419 | ||
420 | # Note: We try to keep $spice_ticket secret (do not pass via command line parameter) | |
421 | # instead we pipe it through STDIN | |
422 | PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub { | |
423 | my $line = shift; | |
424 | ||
425 | if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) { | |
426 | $raddr = $1; | |
427 | $rport = int($2); | |
428 | $ruri = "tcp:$raddr:$rport"; | |
429 | } | |
430 | elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) { | |
431 | $raddr = $1; | |
432 | die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2; | |
433 | $ruri = "unix:$raddr"; | |
434 | } | |
435 | elsif ($line =~ m/^migration listens on port (\d+)$/) { | |
436 | $raddr = "localhost"; | |
437 | $rport = int($1); | |
438 | $ruri = "tcp:$raddr:$rport"; | |
439 | } | |
440 | elsif ($line =~ m/^spice listens on port (\d+)$/) { | |
441 | $spice_port = int($1); | |
442 | } | |
443 | }, errfunc => sub { | |
444 | my $line = shift; | |
445 | $self->log('info', $line); | |
446 | }); | |
447 | ||
448 | die "unable to detect remote migration address\n" if !$raddr; | |
449 | ||
450 | if ($secure_migration) { | |
451 | $self->log('info', "start remote tunnel"); | |
452 | ||
453 | if ($ruri =~ /^unix:/) { | |
454 | unlink $raddr; | |
455 | $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr"); | |
456 | $self->{tunnel}->{sock_addr} = $raddr; | |
457 | ||
458 | my $unix_socket_try = 0; # wait for the socket to become ready | |
459 | while (! -S $raddr) { | |
460 | $unix_socket_try++; | |
461 | if ($unix_socket_try > 100) { | |
462 | $self->{errors} = 1; | |
463 | $self->finish_tunnel($self->{tunnel}); | |
464 | die "Timeout, migration socket $ruri did not get ready"; | |
465 | } | |
466 | ||
467 | usleep(50000); | |
468 | } | |
469 | ||
470 | } elsif ($ruri =~ /^tcp:/) { | |
471 | my $tunnel_addr; | |
472 | if ($raddr eq "localhost") { | |
473 | # for backwards compatibility with older qemu-server versions | |
474 | my $pfamily = PVE::Tools::get_host_address_family($nodename); | |
475 | my $lport = PVE::Tools::next_migrate_port($pfamily); | |
476 | $tunnel_addr = "$lport:localhost:$rport"; | |
477 | } | |
478 | ||
479 | $self->{tunnel} = $self->fork_tunnel($tunnel_addr); | |
480 | ||
481 | } else { | |
482 | die "unsupported protocol in migration URI: $ruri\n"; | |
483 | } | |
484 | } | |
485 | ||
486 | my $start = time(); | |
487 | $self->log('info', "starting online/live migration on $ruri"); | |
488 | $self->{livemigration} = 1; | |
489 | ||
490 | # load_defaults | |
491 | my $defaults = PVE::QemuServer::load_defaults(); | |
492 | ||
493 | # always set migrate speed (overwrite kvm default of 32m) | |
494 | # we set a very hight default of 8192m which is basically unlimited | |
495 | my $migrate_speed = $defaults->{migrate_speed} || 8192; | |
496 | $migrate_speed = $conf->{migrate_speed} || $migrate_speed; | |
497 | $migrate_speed = $migrate_speed * 1048576; | |
498 | $self->log('info', "migrate_set_speed: $migrate_speed"); | |
499 | eval { | |
500 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed)); | |
501 | }; | |
502 | $self->log('info', "migrate_set_speed error: $@") if $@; | |
503 | ||
504 | my $migrate_downtime = $defaults->{migrate_downtime}; | |
505 | $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime}); | |
506 | if (defined($migrate_downtime)) { | |
507 | $self->log('info', "migrate_set_downtime: $migrate_downtime"); | |
508 | eval { | |
509 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); | |
510 | }; | |
511 | $self->log('info', "migrate_set_downtime error: $@") if $@; | |
512 | } | |
513 | ||
514 | $self->log('info', "set migration_caps"); | |
515 | eval { | |
516 | PVE::QemuServer::set_migration_caps($vmid); | |
517 | }; | |
518 | warn $@ if $@; | |
519 | ||
520 | #set cachesize 10% of the total memory | |
521 | my $cachesize = int($conf->{memory}*1048576/10); | |
522 | $self->log('info', "set cachesize: $cachesize"); | |
523 | eval { | |
524 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize)); | |
525 | }; | |
526 | $self->log('info', "migrate-set-cache-size error: $@") if $@; | |
527 | ||
528 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { | |
529 | my $rpcenv = PVE::RPCEnvironment::get(); | |
530 | my $authuser = $rpcenv->get_user(); | |
531 | ||
532 | my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node}); | |
533 | ||
534 | my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem"; | |
535 | my $subject = PVE::AccessControl::read_x509_subject_spice($filename); | |
536 | ||
537 | $self->log('info', "spice client_migrate_info"); | |
538 | ||
539 | eval { | |
540 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice', | |
541 | hostname => $proxyticket, 'tls-port' => $spice_port, | |
542 | 'cert-subject' => $subject); | |
543 | }; | |
544 | $self->log('info', "client_migrate_info error: $@") if $@; | |
545 | ||
546 | } | |
547 | ||
548 | $self->log('info', "start migrate command to $ruri"); | |
549 | eval { | |
550 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri); | |
551 | }; | |
552 | my $merr = $@; | |
553 | $self->log('info', "migrate uri => $ruri failed: $merr") if $merr; | |
554 | ||
555 | my $lstat = 0; | |
556 | my $usleep = 2000000; | |
557 | my $i = 0; | |
558 | my $err_count = 0; | |
559 | my $lastrem = undef; | |
560 | my $downtimecounter = 0; | |
561 | while (1) { | |
562 | $i++; | |
563 | my $avglstat = $lstat/$i if $lstat; | |
564 | ||
565 | usleep($usleep); | |
566 | my $stat; | |
567 | eval { | |
568 | $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate"); | |
569 | }; | |
570 | if (my $err = $@) { | |
571 | $err_count++; | |
572 | warn "query migrate failed: $err\n"; | |
573 | $self->log('info', "query migrate failed: $err"); | |
574 | if ($err_count <= 5) { | |
575 | usleep(1000000); | |
576 | next; | |
577 | } | |
578 | die "too many query migrate failures - aborting\n"; | |
579 | } | |
580 | ||
581 | if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) { | |
582 | sleep(1); | |
583 | next; | |
584 | } | |
585 | ||
586 | if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) { | |
587 | $merr = undef; | |
588 | $err_count = 0; | |
589 | if ($stat->{status} eq 'completed') { | |
590 | my $delay = time() - $start; | |
591 | if ($delay > 0) { | |
592 | my $mbps = sprintf "%.2f", $conf->{memory}/$delay; | |
593 | my $downtime = $stat->{downtime} || 0; | |
594 | $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms"); | |
595 | } | |
596 | } | |
597 | ||
598 | if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') { | |
599 | $self->log('info', "migration status error: $stat->{status}"); | |
600 | die "aborting\n" | |
601 | } | |
602 | ||
603 | if ($stat->{status} ne 'active') { | |
604 | $self->log('info', "migration status: $stat->{status}"); | |
605 | last; | |
606 | } | |
607 | ||
608 | if ($stat->{ram}->{transferred} ne $lstat) { | |
609 | my $trans = $stat->{ram}->{transferred} || 0; | |
610 | my $rem = $stat->{ram}->{remaining} || 0; | |
611 | my $total = $stat->{ram}->{total} || 0; | |
612 | my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0; | |
613 | my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0; | |
614 | my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0; | |
615 | my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0; | |
616 | my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0; | |
617 | #reduce sleep if remainig memory if lower than the everage transfert | |
618 | $usleep = 300000 if $avglstat && $rem < $avglstat; | |
619 | ||
620 | $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " . | |
621 | "remaining ${rem}), total ${total})"); | |
622 | ||
623 | if (${xbzrlecachesize}) { | |
624 | $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}"); | |
625 | } | |
626 | ||
627 | if (($lastrem && $rem > $lastrem ) || ($rem == 0)) { | |
628 | $downtimecounter++; | |
629 | } | |
630 | $lastrem = $rem; | |
631 | ||
632 | if ($downtimecounter > 5) { | |
633 | $downtimecounter = 0; | |
634 | $migrate_downtime *= 2; | |
635 | $self->log('info', "migrate_set_downtime: $migrate_downtime"); | |
636 | eval { | |
637 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); | |
638 | }; | |
639 | $self->log('info', "migrate_set_downtime error: $@") if $@; | |
640 | } | |
641 | ||
642 | } | |
643 | ||
644 | ||
645 | $lstat = $stat->{ram}->{transferred}; | |
646 | ||
647 | } else { | |
648 | die $merr if $merr; | |
649 | die "unable to parse migration status '$stat->{status}' - aborting\n"; | |
650 | } | |
651 | } | |
652 | ||
653 | # just to be sure that the tunnel gets closed on successful migration, on error | |
654 | # phase2_cleanup closes it *after* stopping the remote waiting VM | |
655 | if (!$self->{errors} && $self->{tunnel}) { | |
656 | eval { finish_tunnel($self, $self->{tunnel}); }; | |
657 | if (my $err = $@) { | |
658 | $self->log('err', $err); | |
659 | $self->{errors} = 1; | |
660 | } | |
661 | } | |
662 | } | |
663 | ||
664 | sub phase2_cleanup { | |
665 | my ($self, $vmid, $err) = @_; | |
666 | ||
667 | return if !$self->{errors}; | |
668 | $self->{phase2errors} = 1; | |
669 | ||
670 | $self->log('info', "aborting phase 2 - cleanup resources"); | |
671 | ||
672 | $self->log('info', "migrate_cancel"); | |
673 | eval { | |
674 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel"); | |
675 | }; | |
676 | $self->log('info', "migrate_cancel error: $@") if $@; | |
677 | ||
678 | my $conf = $self->{vmconf}; | |
679 | delete $conf->{lock}; | |
680 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; | |
681 | if (my $err = $@) { | |
682 | $self->log('err', $err); | |
683 | } | |
684 | ||
685 | # cleanup ressources on target host | |
686 | my $nodename = PVE::INotify::nodename(); | |
687 | ||
688 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename]; | |
689 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
690 | if (my $err = $@) { | |
691 | $self->log('err', $err); | |
692 | $self->{errors} = 1; | |
693 | } | |
694 | ||
695 | if ($self->{tunnel}) { | |
696 | eval { finish_tunnel($self, $self->{tunnel}); }; | |
697 | if (my $err = $@) { | |
698 | $self->log('err', $err); | |
699 | $self->{errors} = 1; | |
700 | } | |
701 | } | |
702 | } | |
703 | ||
704 | sub phase3 { | |
705 | my ($self, $vmid) = @_; | |
706 | ||
707 | my $volids = $self->{volumes}; | |
708 | return if $self->{phase2errors}; | |
709 | ||
710 | # destroy local copies | |
711 | foreach my $volid (@$volids) { | |
712 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; | |
713 | if (my $err = $@) { | |
714 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
715 | $self->{errors} = 1; | |
716 | last if $err =~ /^interrupted by signal$/; | |
717 | } | |
718 | } | |
719 | } | |
720 | ||
721 | sub phase3_cleanup { | |
722 | my ($self, $vmid, $err) = @_; | |
723 | ||
724 | my $conf = $self->{vmconf}; | |
725 | return if $self->{phase2errors}; | |
726 | ||
727 | # move config to remote node | |
728 | my $conffile = PVE::QemuConfig->config_file($vmid); | |
729 | my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node}); | |
730 | ||
731 | die "Failed to move config to node '$self->{node}' - rename failed: $!\n" | |
732 | if !rename($conffile, $newconffile); | |
733 | ||
734 | if ($self->{livemigration}) { | |
735 | # now that config file is move, we can resume vm on target if livemigrate | |
736 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck']; | |
737 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, | |
738 | errfunc => sub { | |
739 | my $line = shift; | |
740 | $self->log('err', $line); | |
741 | }); | |
742 | }; | |
743 | if (my $err = $@) { | |
744 | $self->log('err', $err); | |
745 | $self->{errors} = 1; | |
746 | } | |
747 | } | |
748 | ||
749 | eval { | |
750 | ||
751 | my $timer = 0; | |
752 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) { | |
753 | $self->log('info', "Waiting for spice server migration"); | |
754 | while (1) { | |
755 | my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice'); | |
756 | last if int($res->{'migrated'}) == 1; | |
757 | last if $timer > 50; | |
758 | $timer ++; | |
759 | usleep(200000); | |
760 | } | |
761 | } | |
762 | }; | |
763 | ||
764 | # always stop local VM | |
765 | eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); }; | |
766 | if (my $err = $@) { | |
767 | $self->log('err', "stopping vm failed - $err"); | |
768 | $self->{errors} = 1; | |
769 | } | |
770 | ||
771 | # always deactivate volumes - avoid lvm LVs to be active on several nodes | |
772 | eval { | |
773 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
774 | PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist); | |
775 | }; | |
776 | if (my $err = $@) { | |
777 | $self->log('err', $err); | |
778 | $self->{errors} = 1; | |
779 | } | |
780 | ||
781 | # clear migrate lock | |
782 | my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ]; | |
783 | $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock"); | |
784 | } | |
785 | ||
786 | sub final_cleanup { | |
787 | my ($self, $vmid) = @_; | |
788 | ||
789 | # nothing to do | |
790 | } | |
791 | ||
792 | 1; |