]>
Commit | Line | Data |
---|---|---|
1 | package PVE::QemuMigrate; | |
2 | ||
3 | use strict; | |
4 | use warnings; | |
5 | use PVE::AbstractMigrate; | |
6 | use IO::File; | |
7 | use IPC::Open2; | |
8 | use POSIX qw( WNOHANG ); | |
9 | use PVE::INotify; | |
10 | use PVE::Tools; | |
11 | use PVE::Cluster; | |
12 | use PVE::Storage; | |
13 | use PVE::QemuServer; | |
14 | use Time::HiRes qw( usleep ); | |
15 | use PVE::RPCEnvironment; | |
16 | ||
17 | use base qw(PVE::AbstractMigrate); | |
18 | ||
19 | sub fork_command_pipe { | |
20 | my ($self, $cmd) = @_; | |
21 | ||
22 | my $reader = IO::File->new(); | |
23 | my $writer = IO::File->new(); | |
24 | ||
25 | my $orig_pid = $$; | |
26 | ||
27 | my $cpid; | |
28 | ||
29 | eval { $cpid = open2($reader, $writer, @$cmd); }; | |
30 | ||
31 | my $err = $@; | |
32 | ||
33 | # catch exec errors | |
34 | if ($orig_pid != $$) { | |
35 | $self->log('err', "can't fork command pipe\n"); | |
36 | POSIX::_exit(1); | |
37 | kill('KILL', $$); | |
38 | } | |
39 | ||
40 | die $err if $err; | |
41 | ||
42 | return { writer => $writer, reader => $reader, pid => $cpid }; | |
43 | } | |
44 | ||
45 | sub finish_command_pipe { | |
46 | my ($self, $cmdpipe, $timeout) = @_; | |
47 | ||
48 | my $cpid = $cmdpipe->{pid}; | |
49 | return if !defined($cpid); | |
50 | ||
51 | my $writer = $cmdpipe->{writer}; | |
52 | my $reader = $cmdpipe->{reader}; | |
53 | ||
54 | $writer->close(); | |
55 | $reader->close(); | |
56 | ||
57 | my $collect_child_process = sub { | |
58 | my $res = waitpid($cpid, WNOHANG); | |
59 | if (defined($res) && ($res == $cpid)) { | |
60 | delete $cmdpipe->{cpid}; | |
61 | return 1; | |
62 | } else { | |
63 | return 0; | |
64 | } | |
65 | }; | |
66 | ||
67 | if ($timeout) { | |
68 | for (my $i = 0; $i < $timeout; $i++) { | |
69 | return if &$collect_child_process(); | |
70 | sleep(1); | |
71 | } | |
72 | } | |
73 | ||
74 | $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n"); | |
75 | kill(15, $cpid); | |
76 | ||
77 | # wait again | |
78 | for (my $i = 0; $i < 10; $i++) { | |
79 | return if &$collect_child_process(); | |
80 | sleep(1); | |
81 | } | |
82 | ||
83 | $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n"); | |
84 | kill 9, $cpid; | |
85 | sleep 1; | |
86 | ||
87 | $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n") | |
88 | if !&$collect_child_process(); | |
89 | } | |
90 | ||
91 | sub fork_tunnel { | |
92 | my ($self, $tunnel_addr) = @_; | |
93 | ||
94 | my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : (); | |
95 | ||
96 | my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, 'qm', 'mtunnel' ]; | |
97 | ||
98 | my $tunnel = $self->fork_command_pipe($cmd); | |
99 | ||
100 | my $reader = $tunnel->{reader}; | |
101 | ||
102 | my $helo; | |
103 | eval { | |
104 | PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; }); | |
105 | die "no reply\n" if !$helo; | |
106 | die "no quorum on target node\n" if $helo =~ m/^no quorum$/; | |
107 | die "got strange reply from mtunnel ('$helo')\n" | |
108 | if $helo !~ m/^tunnel online$/; | |
109 | }; | |
110 | my $err = $@; | |
111 | ||
112 | if ($err) { | |
113 | $self->finish_command_pipe($tunnel); | |
114 | die "can't open migration tunnel - $err"; | |
115 | } | |
116 | return $tunnel; | |
117 | } | |
118 | ||
119 | sub finish_tunnel { | |
120 | my ($self, $tunnel) = @_; | |
121 | ||
122 | my $writer = $tunnel->{writer}; | |
123 | ||
124 | eval { | |
125 | PVE::Tools::run_with_timeout(30, sub { | |
126 | print $writer "quit\n"; | |
127 | $writer->flush(); | |
128 | }); | |
129 | }; | |
130 | my $err = $@; | |
131 | ||
132 | $self->finish_command_pipe($tunnel, 30); | |
133 | ||
134 | if ($tunnel->{sock_addr}) { | |
135 | # ssh does not clean up on local host | |
136 | my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; # | |
137 | PVE::Tools::run_command($cmd); | |
138 | ||
139 | # .. and just to be sure check on remote side | |
140 | unshift @{$cmd}, @{$self->{rem_ssh}}; | |
141 | PVE::Tools::run_command($cmd); | |
142 | } | |
143 | ||
144 | die $err if $err; | |
145 | } | |
146 | ||
147 | sub lock_vm { | |
148 | my ($self, $vmid, $code, @param) = @_; | |
149 | ||
150 | return PVE::QemuConfig->lock_config($vmid, $code, @param); | |
151 | } | |
152 | ||
153 | sub prepare { | |
154 | my ($self, $vmid) = @_; | |
155 | ||
156 | my $online = $self->{opts}->{online}; | |
157 | ||
158 | $self->{storecfg} = PVE::Storage::config(); | |
159 | ||
160 | # test if VM exists | |
161 | my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid); | |
162 | ||
163 | PVE::QemuConfig->check_lock($conf); | |
164 | ||
165 | my $running = 0; | |
166 | if (my $pid = PVE::QemuServer::check_running($vmid)) { | |
167 | die "can't migrate running VM without --online\n" if !$online; | |
168 | $running = $pid; | |
169 | ||
170 | $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf); | |
171 | ||
172 | } | |
173 | ||
174 | if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) { | |
175 | if ($self->{running} || !$self->{opts}->{force}) { | |
176 | die "can't migrate VM which uses local devices\n"; | |
177 | } else { | |
178 | $self->log('info', "migrating VM which uses local devices"); | |
179 | } | |
180 | } | |
181 | ||
182 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
183 | ||
184 | my $need_activate = []; | |
185 | foreach my $volid (@$vollist) { | |
186 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1); | |
187 | ||
188 | # check if storage is available on both nodes | |
189 | my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid; | |
190 | ||
191 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
192 | PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); | |
193 | ||
194 | if ($scfg->{shared}) { | |
195 | # PVE::Storage::activate_storage checks this for non-shared storages | |
196 | my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); | |
197 | warn "Used shared storage '$sid' is not online on source node!\n" | |
198 | if !$plugin->check_connection($sid, $scfg); | |
199 | } else { | |
200 | # only activate if not shared | |
201 | push @$need_activate, $volid; | |
202 | } | |
203 | } | |
204 | ||
205 | # activate volumes | |
206 | PVE::Storage::activate_volumes($self->{storecfg}, $need_activate); | |
207 | ||
208 | # test ssh connection | |
209 | my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ]; | |
210 | eval { $self->cmd_quiet($cmd); }; | |
211 | die "Can't connect to destination address using public key\n" if $@; | |
212 | ||
213 | return $running; | |
214 | } | |
215 | ||
216 | sub sync_disks { | |
217 | my ($self, $vmid) = @_; | |
218 | ||
219 | my $conf = $self->{vmconf}; | |
220 | ||
221 | # local volumes which have been copied | |
222 | $self->{volumes} = []; | |
223 | ||
224 | my $res = []; | |
225 | ||
226 | eval { | |
227 | ||
228 | # found local volumes and their origin | |
229 | my $local_volumes = {}; | |
230 | my $local_volumes_errors = {}; | |
231 | my $other_errors = []; | |
232 | my $abort = 0; | |
233 | ||
234 | my $sharedvm = 1; | |
235 | ||
236 | my $log_error = sub { | |
237 | my ($msg, $volid) = @_; | |
238 | ||
239 | if (defined($volid)) { | |
240 | $local_volumes_errors->{$volid} = $msg; | |
241 | } else { | |
242 | push @$other_errors, $msg; | |
243 | } | |
244 | $abort = 1; | |
245 | }; | |
246 | ||
247 | my @sids = PVE::Storage::storage_ids($self->{storecfg}); | |
248 | foreach my $storeid (@sids) { | |
249 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid); | |
250 | next if $scfg->{shared}; | |
251 | next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1); | |
252 | ||
253 | # get list from PVE::Storage (for unused volumes) | |
254 | my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid); | |
255 | ||
256 | next if @{$dl->{$storeid}} == 0; | |
257 | ||
258 | # check if storage is available on target node | |
259 | PVE::Storage::storage_check_node($self->{storecfg}, $storeid, $self->{node}); | |
260 | $sharedvm = 0; # there is a non-shared disk | |
261 | ||
262 | PVE::Storage::foreach_volid($dl, sub { | |
263 | my ($volid, $sid, $volname) = @_; | |
264 | ||
265 | $local_volumes->{$volid} = 'storage'; | |
266 | }); | |
267 | } | |
268 | ||
269 | my $test_volid = sub { | |
270 | my ($volid, $is_cdrom, $snapname) = @_; | |
271 | ||
272 | return if !$volid; | |
273 | ||
274 | if ($volid =~ m|^/|) { | |
275 | $local_volumes->{$volid} = 'config'; | |
276 | die "local file/device\n"; | |
277 | } | |
278 | ||
279 | if ($is_cdrom) { | |
280 | if ($volid eq 'cdrom') { | |
281 | my $msg = "can't migrate local cdrom drive"; | |
282 | $msg .= " (referenced in snapshot '$snapname')" | |
283 | if defined($snapname); | |
284 | ||
285 | &$log_error("$msg\n"); | |
286 | return; | |
287 | } | |
288 | return if $volid eq 'none'; | |
289 | } | |
290 | ||
291 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
292 | ||
293 | my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid; | |
294 | # check if storage is available on both nodes | |
295 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
296 | PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node}); | |
297 | ||
298 | return if $scfg->{shared}; | |
299 | ||
300 | $sharedvm = 0; | |
301 | ||
302 | $local_volumes->{$volid} = defined($snapname) ? 'snapshot' : 'config'; | |
303 | ||
304 | die "local cdrom image\n" if $is_cdrom; | |
305 | ||
306 | my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid); | |
307 | ||
308 | die "owned by other VM (owner = VM $owner)\n" | |
309 | if !$owner || ($owner != $self->{vmid}); | |
310 | ||
311 | if (defined($snapname)) { | |
312 | # we cannot migrate shapshots on local storage | |
313 | # exceptions: 'zfspool' or 'qcow2' files (on directory storage) | |
314 | ||
315 | my $format = PVE::QemuServer::qemu_img_format($scfg, $volname); | |
316 | die "online storage migration not possible if snapshot exists\n" if $self->{running}; | |
317 | if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) { | |
318 | die "non-migratable snapshot exists\n"; | |
319 | } | |
320 | } | |
321 | ||
322 | die "referenced by linked clone(s)\n" | |
323 | if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid); | |
324 | }; | |
325 | ||
326 | my $test_drive = sub { | |
327 | my ($ds, $drive, $snapname) = @_; | |
328 | ||
329 | eval { | |
330 | &$test_volid($drive->{file}, PVE::QemuServer::drive_is_cdrom($drive), $snapname); | |
331 | }; | |
332 | ||
333 | &$log_error($@, $drive->{file}) if $@; | |
334 | }; | |
335 | ||
336 | foreach my $snapname (keys %{$conf->{snapshots}}) { | |
337 | eval { | |
338 | &$test_volid($conf->{snapshots}->{$snapname}->{'vmstate'}, 0, undef) | |
339 | if defined($conf->{snapshots}->{$snapname}->{'vmstate'}); | |
340 | }; | |
341 | &$log_error($@, $conf->{snapshots}->{$snapname}->{'vmstate'}) if $@; | |
342 | ||
343 | PVE::QemuServer::foreach_drive($conf->{snapshots}->{$snapname}, $test_drive, $snapname); | |
344 | } | |
345 | PVE::QemuServer::foreach_drive($conf, $test_drive); | |
346 | ||
347 | foreach my $vol (sort keys %$local_volumes) { | |
348 | if ($local_volumes->{$vol} eq 'storage') { | |
349 | $self->log('info', "found local disk '$vol' (via storage)\n"); | |
350 | } elsif ($local_volumes->{$vol} eq 'config') { | |
351 | $self->log('info', "found local disk '$vol' (in current VM config)\n"); | |
352 | } elsif ($local_volumes->{$vol} eq 'snapshot') { | |
353 | $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n"); | |
354 | } else { | |
355 | $self->log('info', "found local disk '$vol'\n"); | |
356 | } | |
357 | } | |
358 | ||
359 | foreach my $vol (sort keys %$local_volumes_errors) { | |
360 | $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}"); | |
361 | } | |
362 | foreach my $err (@$other_errors) { | |
363 | $self->log('warn', "$err"); | |
364 | } | |
365 | ||
366 | if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) { | |
367 | $self->{opts}->{targetstorage} = 1; #use same sid for remote local | |
368 | } | |
369 | ||
370 | if ($abort) { | |
371 | die "can't migrate VM - check log\n"; | |
372 | } | |
373 | ||
374 | # additional checks for local storage | |
375 | foreach my $volid (keys %$local_volumes) { | |
376 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
377 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid); | |
378 | ||
379 | my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') || | |
380 | ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm'); | |
381 | ||
382 | die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n" | |
383 | if !$migratable; | |
384 | ||
385 | # image is a linked clone on local storage, se we can't migrate. | |
386 | if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) { | |
387 | die "can't migrate '$volid' as it's a clone of '$basename'"; | |
388 | } | |
389 | } | |
390 | ||
391 | $self->log('info', "copying disk images"); | |
392 | ||
393 | foreach my $volid (keys %$local_volumes) { | |
394 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
395 | if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid} eq 'config') { | |
396 | push @{$self->{online_local_volumes}}, $volid; | |
397 | } else { | |
398 | push @{$self->{volumes}}, $volid; | |
399 | PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid); | |
400 | } | |
401 | } | |
402 | }; | |
403 | die "Failed to sync data - $@" if $@; | |
404 | } | |
405 | ||
406 | sub cleanup_remotedisks { | |
407 | my ($self) = @_; | |
408 | ||
409 | foreach my $target_drive (keys %{$self->{target_drive}}) { | |
410 | ||
411 | my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid}); | |
412 | my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file}); | |
413 | ||
414 | my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"]; | |
415 | ||
416 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
417 | if (my $err = $@) { | |
418 | $self->log('err', $err); | |
419 | $self->{errors} = 1; | |
420 | } | |
421 | } | |
422 | } | |
423 | ||
424 | sub phase1 { | |
425 | my ($self, $vmid) = @_; | |
426 | ||
427 | $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})"); | |
428 | ||
429 | my $conf = $self->{vmconf}; | |
430 | ||
431 | # set migrate lock in config file | |
432 | $conf->{lock} = 'migrate'; | |
433 | PVE::QemuConfig->write_config($vmid, $conf); | |
434 | ||
435 | sync_disks($self, $vmid); | |
436 | ||
437 | }; | |
438 | ||
439 | sub phase1_cleanup { | |
440 | my ($self, $vmid, $err) = @_; | |
441 | ||
442 | $self->log('info', "aborting phase 1 - cleanup resources"); | |
443 | ||
444 | my $conf = $self->{vmconf}; | |
445 | delete $conf->{lock}; | |
446 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; | |
447 | if (my $err = $@) { | |
448 | $self->log('err', $err); | |
449 | } | |
450 | ||
451 | if ($self->{volumes}) { | |
452 | foreach my $volid (@{$self->{volumes}}) { | |
453 | $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'"); | |
454 | # fixme: try to remove ? | |
455 | } | |
456 | } | |
457 | } | |
458 | ||
459 | sub phase2 { | |
460 | my ($self, $vmid) = @_; | |
461 | ||
462 | my $conf = $self->{vmconf}; | |
463 | ||
464 | $self->log('info', "starting VM $vmid on remote node '$self->{node}'"); | |
465 | ||
466 | my $raddr; | |
467 | my $rport; | |
468 | my $ruri; # the whole migration dst. URI (protocol:address[:port]) | |
469 | my $nodename = PVE::INotify::nodename(); | |
470 | ||
471 | ## start on remote node | |
472 | my $cmd = [@{$self->{rem_ssh}}]; | |
473 | ||
474 | my $spice_ticket; | |
475 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { | |
476 | my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice'); | |
477 | $spice_ticket = $res->{ticket}; | |
478 | } | |
479 | ||
480 | push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename; | |
481 | ||
482 | # we use TCP only for unsecure migrations as TCP ssh forward tunnels often | |
483 | # did appeared to late (they are hard, if not impossible, to check for) | |
484 | # secure migration use UNIX sockets now, this *breaks* compatibilty when trying | |
485 | # to migrate from new to old but *not* from old to new. | |
486 | my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg'); | |
487 | ||
488 | my $migration_type = 'secure'; | |
489 | if (defined($self->{opts}->{migration_type})) { | |
490 | $migration_type = $self->{opts}->{migration_type}; | |
491 | } elsif (defined($datacenterconf->{migration}->{type})) { | |
492 | $migration_type = $datacenterconf->{migration}->{type}; | |
493 | } | |
494 | ||
495 | push @$cmd, '--migration_type', $migration_type; | |
496 | ||
497 | push @$cmd, '--migration_network', $self->{opts}->{migration_network} | |
498 | if $self->{opts}->{migration_network}; | |
499 | ||
500 | if ($migration_type eq 'insecure') { | |
501 | push @$cmd, '--stateuri', 'tcp'; | |
502 | } else { | |
503 | push @$cmd, '--stateuri', 'unix'; | |
504 | } | |
505 | ||
506 | if ($self->{forcemachine}) { | |
507 | push @$cmd, '--machine', $self->{forcemachine}; | |
508 | } | |
509 | ||
510 | if ($self->{opts}->{targetstorage}) { | |
511 | push @$cmd, '--targetstorage', $self->{opts}->{targetstorage}; | |
512 | } | |
513 | ||
514 | my $spice_port; | |
515 | ||
516 | # Note: We try to keep $spice_ticket secret (do not pass via command line parameter) | |
517 | # instead we pipe it through STDIN | |
518 | PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub { | |
519 | my $line = shift; | |
520 | ||
521 | if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) { | |
522 | $raddr = $1; | |
523 | $rport = int($2); | |
524 | $ruri = "tcp:$raddr:$rport"; | |
525 | } | |
526 | elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) { | |
527 | $raddr = $1; | |
528 | die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2; | |
529 | $ruri = "unix:$raddr"; | |
530 | } | |
531 | elsif ($line =~ m/^migration listens on port (\d+)$/) { | |
532 | $raddr = "localhost"; | |
533 | $rport = int($1); | |
534 | $ruri = "tcp:$raddr:$rport"; | |
535 | } | |
536 | elsif ($line =~ m/^spice listens on port (\d+)$/) { | |
537 | $spice_port = int($1); | |
538 | } | |
539 | elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) { | |
540 | my $volid = $4; | |
541 | my $nbd_uri = "nbd:$1:$2:exportname=$3"; | |
542 | my $targetdrive = $3; | |
543 | $targetdrive =~ s/drive-//g; | |
544 | ||
545 | $self->{target_drive}->{$targetdrive}->{volid} = $volid; | |
546 | $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri; | |
547 | ||
548 | } | |
549 | }, errfunc => sub { | |
550 | my $line = shift; | |
551 | $self->log('info', $line); | |
552 | }); | |
553 | ||
554 | die "unable to detect remote migration address\n" if !$raddr; | |
555 | ||
556 | if ($migration_type eq 'secure') { | |
557 | $self->log('info', "start remote tunnel"); | |
558 | ||
559 | if ($ruri =~ /^unix:/) { | |
560 | unlink $raddr; | |
561 | $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr"); | |
562 | $self->{tunnel}->{sock_addr} = $raddr; | |
563 | ||
564 | my $unix_socket_try = 0; # wait for the socket to become ready | |
565 | while (! -S $raddr) { | |
566 | $unix_socket_try++; | |
567 | if ($unix_socket_try > 100) { | |
568 | $self->{errors} = 1; | |
569 | $self->finish_tunnel($self->{tunnel}); | |
570 | die "Timeout, migration socket $ruri did not get ready"; | |
571 | } | |
572 | ||
573 | usleep(50000); | |
574 | } | |
575 | ||
576 | } elsif ($ruri =~ /^tcp:/) { | |
577 | my $tunnel_addr; | |
578 | if ($raddr eq "localhost") { | |
579 | # for backwards compatibility with older qemu-server versions | |
580 | my $pfamily = PVE::Tools::get_host_address_family($nodename); | |
581 | my $lport = PVE::Tools::next_migrate_port($pfamily); | |
582 | $tunnel_addr = "$lport:localhost:$rport"; | |
583 | } | |
584 | ||
585 | $self->{tunnel} = $self->fork_tunnel($tunnel_addr); | |
586 | ||
587 | } else { | |
588 | die "unsupported protocol in migration URI: $ruri\n"; | |
589 | } | |
590 | } | |
591 | ||
592 | my $start = time(); | |
593 | ||
594 | if ($self->{opts}->{targetstorage}) { | |
595 | $self->{storage_migration} = 1; | |
596 | $self->{storage_migration_jobs} = {}; | |
597 | $self->log('info', "starting storage migration"); | |
598 | ||
599 | die "the number of destination local disk is not equal to number of source local disk" if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}}); | |
600 | foreach my $drive (keys %{$self->{target_drive}}){ | |
601 | $self->log('info', "$drive: start migration to to $self->{target_drive}->{$drive}->{nbd_uri}"); | |
602 | PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $self->{target_drive}->{$drive}->{nbd_uri}, $vmid, undef, $self->{storage_migration_jobs}, 1); | |
603 | } | |
604 | } | |
605 | ||
606 | $self->log('info', "starting online/live migration on $ruri"); | |
607 | $self->{livemigration} = 1; | |
608 | ||
609 | # load_defaults | |
610 | my $defaults = PVE::QemuServer::load_defaults(); | |
611 | ||
612 | # always set migrate speed (overwrite kvm default of 32m) | |
613 | # we set a very hight default of 8192m which is basically unlimited | |
614 | my $migrate_speed = $defaults->{migrate_speed} || 8192; | |
615 | $migrate_speed = $conf->{migrate_speed} || $migrate_speed; | |
616 | $migrate_speed = $migrate_speed * 1048576; | |
617 | $self->log('info', "migrate_set_speed: $migrate_speed"); | |
618 | eval { | |
619 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed)); | |
620 | }; | |
621 | $self->log('info', "migrate_set_speed error: $@") if $@; | |
622 | ||
623 | my $migrate_downtime = $defaults->{migrate_downtime}; | |
624 | $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime}); | |
625 | if (defined($migrate_downtime)) { | |
626 | $self->log('info', "migrate_set_downtime: $migrate_downtime"); | |
627 | eval { | |
628 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); | |
629 | }; | |
630 | $self->log('info', "migrate_set_downtime error: $@") if $@; | |
631 | } | |
632 | ||
633 | $self->log('info', "set migration_caps"); | |
634 | eval { | |
635 | PVE::QemuServer::set_migration_caps($vmid); | |
636 | }; | |
637 | warn $@ if $@; | |
638 | ||
639 | #set cachesize 10% of the total memory | |
640 | my $cachesize = int($conf->{memory}*1048576/10); | |
641 | $self->log('info', "set cachesize: $cachesize"); | |
642 | eval { | |
643 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize)); | |
644 | }; | |
645 | $self->log('info', "migrate-set-cache-size error: $@") if $@; | |
646 | ||
647 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { | |
648 | my $rpcenv = PVE::RPCEnvironment::get(); | |
649 | my $authuser = $rpcenv->get_user(); | |
650 | ||
651 | my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node}); | |
652 | ||
653 | my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem"; | |
654 | my $subject = PVE::AccessControl::read_x509_subject_spice($filename); | |
655 | ||
656 | $self->log('info', "spice client_migrate_info"); | |
657 | ||
658 | eval { | |
659 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice', | |
660 | hostname => $proxyticket, 'tls-port' => $spice_port, | |
661 | 'cert-subject' => $subject); | |
662 | }; | |
663 | $self->log('info', "client_migrate_info error: $@") if $@; | |
664 | ||
665 | } | |
666 | ||
667 | $self->log('info', "start migrate command to $ruri"); | |
668 | eval { | |
669 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri); | |
670 | }; | |
671 | my $merr = $@; | |
672 | $self->log('info', "migrate uri => $ruri failed: $merr") if $merr; | |
673 | ||
674 | my $lstat = 0; | |
675 | my $usleep = 2000000; | |
676 | my $i = 0; | |
677 | my $err_count = 0; | |
678 | my $lastrem = undef; | |
679 | my $downtimecounter = 0; | |
680 | while (1) { | |
681 | $i++; | |
682 | my $avglstat = $lstat/$i if $lstat; | |
683 | ||
684 | usleep($usleep); | |
685 | my $stat; | |
686 | eval { | |
687 | $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate"); | |
688 | }; | |
689 | if (my $err = $@) { | |
690 | $err_count++; | |
691 | warn "query migrate failed: $err\n"; | |
692 | $self->log('info', "query migrate failed: $err"); | |
693 | if ($err_count <= 5) { | |
694 | usleep(1000000); | |
695 | next; | |
696 | } | |
697 | die "too many query migrate failures - aborting\n"; | |
698 | } | |
699 | ||
700 | if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) { | |
701 | sleep(1); | |
702 | next; | |
703 | } | |
704 | ||
705 | if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) { | |
706 | $merr = undef; | |
707 | $err_count = 0; | |
708 | if ($stat->{status} eq 'completed') { | |
709 | my $delay = time() - $start; | |
710 | if ($delay > 0) { | |
711 | my $mbps = sprintf "%.2f", $conf->{memory}/$delay; | |
712 | my $downtime = $stat->{downtime} || 0; | |
713 | $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms"); | |
714 | } | |
715 | } | |
716 | ||
717 | if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') { | |
718 | $self->log('info', "migration status error: $stat->{status}"); | |
719 | die "aborting\n" | |
720 | } | |
721 | ||
722 | if ($stat->{status} ne 'active') { | |
723 | $self->log('info', "migration status: $stat->{status}"); | |
724 | last; | |
725 | } | |
726 | ||
727 | if ($stat->{ram}->{transferred} ne $lstat) { | |
728 | my $trans = $stat->{ram}->{transferred} || 0; | |
729 | my $rem = $stat->{ram}->{remaining} || 0; | |
730 | my $total = $stat->{ram}->{total} || 0; | |
731 | my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0; | |
732 | my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0; | |
733 | my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0; | |
734 | my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0; | |
735 | my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0; | |
736 | #reduce sleep if remainig memory if lower than the everage transfert | |
737 | $usleep = 300000 if $avglstat && $rem < $avglstat; | |
738 | ||
739 | $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " . | |
740 | "remaining ${rem}), total ${total})"); | |
741 | ||
742 | if (${xbzrlecachesize}) { | |
743 | $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}"); | |
744 | } | |
745 | ||
746 | if (($lastrem && $rem > $lastrem ) || ($rem == 0)) { | |
747 | $downtimecounter++; | |
748 | } | |
749 | $lastrem = $rem; | |
750 | ||
751 | if ($downtimecounter > 5) { | |
752 | $downtimecounter = 0; | |
753 | $migrate_downtime *= 2; | |
754 | $self->log('info', "migrate_set_downtime: $migrate_downtime"); | |
755 | eval { | |
756 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); | |
757 | }; | |
758 | $self->log('info', "migrate_set_downtime error: $@") if $@; | |
759 | } | |
760 | ||
761 | } | |
762 | ||
763 | ||
764 | $lstat = $stat->{ram}->{transferred}; | |
765 | ||
766 | } else { | |
767 | die $merr if $merr; | |
768 | die "unable to parse migration status '$stat->{status}' - aborting\n"; | |
769 | } | |
770 | } | |
771 | ||
772 | # just to be sure that the tunnel gets closed on successful migration, on error | |
773 | # phase2_cleanup closes it *after* stopping the remote waiting VM | |
774 | if (!$self->{errors} && $self->{tunnel}) { | |
775 | eval { finish_tunnel($self, $self->{tunnel}); }; | |
776 | if (my $err = $@) { | |
777 | $self->log('err', $err); | |
778 | $self->{errors} = 1; | |
779 | } | |
780 | } | |
781 | } | |
782 | ||
783 | sub phase2_cleanup { | |
784 | my ($self, $vmid, $err) = @_; | |
785 | ||
786 | return if !$self->{errors}; | |
787 | $self->{phase2errors} = 1; | |
788 | ||
789 | $self->log('info', "aborting phase 2 - cleanup resources"); | |
790 | ||
791 | $self->log('info', "migrate_cancel"); | |
792 | eval { | |
793 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel"); | |
794 | }; | |
795 | $self->log('info', "migrate_cancel error: $@") if $@; | |
796 | ||
797 | my $conf = $self->{vmconf}; | |
798 | delete $conf->{lock}; | |
799 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; | |
800 | if (my $err = $@) { | |
801 | $self->log('err', $err); | |
802 | } | |
803 | ||
804 | # cleanup ressources on target host | |
805 | if ( $self->{storage_migration} ) { | |
806 | ||
807 | eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; | |
808 | if (my $err = $@) { | |
809 | $self->log('err', $err); | |
810 | } | |
811 | ||
812 | eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; | |
813 | if (my $err = $@) { | |
814 | $self->log('err', $err); | |
815 | } | |
816 | } | |
817 | ||
818 | my $nodename = PVE::INotify::nodename(); | |
819 | ||
820 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename]; | |
821 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
822 | if (my $err = $@) { | |
823 | $self->log('err', $err); | |
824 | $self->{errors} = 1; | |
825 | } | |
826 | ||
827 | if ($self->{tunnel}) { | |
828 | eval { finish_tunnel($self, $self->{tunnel}); }; | |
829 | if (my $err = $@) { | |
830 | $self->log('err', $err); | |
831 | $self->{errors} = 1; | |
832 | } | |
833 | } | |
834 | } | |
835 | ||
836 | sub phase3 { | |
837 | my ($self, $vmid) = @_; | |
838 | ||
839 | my $volids = $self->{volumes}; | |
840 | return if $self->{phase2errors}; | |
841 | ||
842 | # destroy local copies | |
843 | foreach my $volid (@$volids) { | |
844 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; | |
845 | if (my $err = $@) { | |
846 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
847 | $self->{errors} = 1; | |
848 | last if $err =~ /^interrupted by signal$/; | |
849 | } | |
850 | } | |
851 | } | |
852 | ||
853 | sub phase3_cleanup { | |
854 | my ($self, $vmid, $err) = @_; | |
855 | ||
856 | my $conf = $self->{vmconf}; | |
857 | return if $self->{phase2errors}; | |
858 | ||
859 | if ($self->{storage_migration}) { | |
860 | ||
861 | eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); }; #finish block-job | |
862 | ||
863 | if (my $err = $@) { | |
864 | eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; | |
865 | eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; | |
866 | die "Failed to completed storage migration\n"; | |
867 | } else { | |
868 | ||
869 | foreach my $target_drive (keys %{$self->{target_drive}}) { | |
870 | my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid}); | |
871 | $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive); | |
872 | PVE::QemuConfig->write_config($vmid, $conf); | |
873 | } | |
874 | } | |
875 | } | |
876 | ||
877 | # move config to remote node | |
878 | my $conffile = PVE::QemuConfig->config_file($vmid); | |
879 | my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node}); | |
880 | ||
881 | die "Failed to move config to node '$self->{node}' - rename failed: $!\n" | |
882 | if !rename($conffile, $newconffile); | |
883 | ||
884 | if ($self->{livemigration}) { | |
885 | # now that config file is move, we can resume vm on target if livemigrate | |
886 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck']; | |
887 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, | |
888 | errfunc => sub { | |
889 | my $line = shift; | |
890 | $self->log('err', $line); | |
891 | }); | |
892 | }; | |
893 | if (my $err = $@) { | |
894 | $self->log('err', $err); | |
895 | $self->{errors} = 1; | |
896 | } | |
897 | } | |
898 | ||
899 | eval { | |
900 | ||
901 | my $timer = 0; | |
902 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) { | |
903 | $self->log('info', "Waiting for spice server migration"); | |
904 | while (1) { | |
905 | my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice'); | |
906 | last if int($res->{'migrated'}) == 1; | |
907 | last if $timer > 50; | |
908 | $timer ++; | |
909 | usleep(200000); | |
910 | } | |
911 | } | |
912 | }; | |
913 | ||
914 | # always stop local VM | |
915 | eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); }; | |
916 | if (my $err = $@) { | |
917 | $self->log('err', "stopping vm failed - $err"); | |
918 | $self->{errors} = 1; | |
919 | } | |
920 | ||
921 | # always deactivate volumes - avoid lvm LVs to be active on several nodes | |
922 | eval { | |
923 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
924 | PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist); | |
925 | }; | |
926 | if (my $err = $@) { | |
927 | $self->log('err', $err); | |
928 | $self->{errors} = 1; | |
929 | } | |
930 | ||
931 | if($self->{storage_migration}) { | |
932 | # destroy local copies | |
933 | my $volids = $self->{online_local_volumes}; | |
934 | ||
935 | foreach my $volid (@$volids) { | |
936 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; | |
937 | if (my $err = $@) { | |
938 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
939 | $self->{errors} = 1; | |
940 | last if $err =~ /^interrupted by signal$/; | |
941 | } | |
942 | } | |
943 | ||
944 | #stop nbd server to remote vm | |
945 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid]; | |
946 | ||
947 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
948 | if (my $err = $@) { | |
949 | $self->log('err', $err); | |
950 | $self->{errors} = 1; | |
951 | } | |
952 | } | |
953 | ||
954 | # clear migrate lock | |
955 | my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ]; | |
956 | $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock"); | |
957 | } | |
958 | ||
959 | sub final_cleanup { | |
960 | my ($self, $vmid) = @_; | |
961 | ||
962 | # nothing to do | |
963 | } | |
964 | ||
965 | 1; |