]>
Commit | Line | Data |
---|---|---|
1 | package PVE::QemuMigrate; | |
2 | ||
3 | use strict; | |
4 | use warnings; | |
5 | use PVE::AbstractMigrate; | |
6 | use IO::File; | |
7 | use IPC::Open2; | |
8 | use POSIX qw( WNOHANG ); | |
9 | use PVE::INotify; | |
10 | use PVE::Tools; | |
11 | use PVE::Cluster; | |
12 | use PVE::Storage; | |
13 | use PVE::QemuServer; | |
14 | use Time::HiRes qw( usleep ); | |
15 | use PVE::RPCEnvironment; | |
16 | use PVE::ReplicationConfig; | |
17 | use PVE::ReplicationState; | |
18 | use PVE::Replication; | |
19 | ||
20 | use base qw(PVE::AbstractMigrate); | |
21 | ||
22 | sub fork_command_pipe { | |
23 | my ($self, $cmd) = @_; | |
24 | ||
25 | my $reader = IO::File->new(); | |
26 | my $writer = IO::File->new(); | |
27 | ||
28 | my $orig_pid = $$; | |
29 | ||
30 | my $cpid; | |
31 | ||
32 | eval { $cpid = open2($reader, $writer, @$cmd); }; | |
33 | ||
34 | my $err = $@; | |
35 | ||
36 | # catch exec errors | |
37 | if ($orig_pid != $$) { | |
38 | $self->log('err', "can't fork command pipe\n"); | |
39 | POSIX::_exit(1); | |
40 | kill('KILL', $$); | |
41 | } | |
42 | ||
43 | die $err if $err; | |
44 | ||
45 | return { writer => $writer, reader => $reader, pid => $cpid }; | |
46 | } | |
47 | ||
48 | sub finish_command_pipe { | |
49 | my ($self, $cmdpipe, $timeout) = @_; | |
50 | ||
51 | my $cpid = $cmdpipe->{pid}; | |
52 | return if !defined($cpid); | |
53 | ||
54 | my $writer = $cmdpipe->{writer}; | |
55 | my $reader = $cmdpipe->{reader}; | |
56 | ||
57 | $writer->close(); | |
58 | $reader->close(); | |
59 | ||
60 | my $collect_child_process = sub { | |
61 | my $res = waitpid($cpid, WNOHANG); | |
62 | if (defined($res) && ($res == $cpid)) { | |
63 | delete $cmdpipe->{cpid}; | |
64 | return 1; | |
65 | } else { | |
66 | return 0; | |
67 | } | |
68 | }; | |
69 | ||
70 | if ($timeout) { | |
71 | for (my $i = 0; $i < $timeout; $i++) { | |
72 | return if &$collect_child_process(); | |
73 | sleep(1); | |
74 | } | |
75 | } | |
76 | ||
77 | $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n"); | |
78 | kill(15, $cpid); | |
79 | ||
80 | # wait again | |
81 | for (my $i = 0; $i < 10; $i++) { | |
82 | return if &$collect_child_process(); | |
83 | sleep(1); | |
84 | } | |
85 | ||
86 | $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n"); | |
87 | kill 9, $cpid; | |
88 | sleep 1; | |
89 | ||
90 | $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n") | |
91 | if !&$collect_child_process(); | |
92 | } | |
93 | ||
94 | sub read_tunnel { | |
95 | my ($self, $tunnel, $timeout) = @_; | |
96 | ||
97 | $timeout = 60 if !defined($timeout); | |
98 | ||
99 | my $reader = $tunnel->{reader}; | |
100 | ||
101 | my $output; | |
102 | eval { | |
103 | PVE::Tools::run_with_timeout($timeout, sub { $output = <$reader>; }); | |
104 | }; | |
105 | die "reading from tunnel failed: $@\n" if $@; | |
106 | ||
107 | chomp $output; | |
108 | ||
109 | return $output; | |
110 | } | |
111 | ||
112 | sub write_tunnel { | |
113 | my ($self, $tunnel, $timeout, $command) = @_; | |
114 | ||
115 | $timeout = 60 if !defined($timeout); | |
116 | ||
117 | my $writer = $tunnel->{writer}; | |
118 | ||
119 | eval { | |
120 | PVE::Tools::run_with_timeout($timeout, sub { | |
121 | print $writer "$command\n"; | |
122 | $writer->flush(); | |
123 | }); | |
124 | }; | |
125 | die "writing to tunnel failed: $@\n" if $@; | |
126 | ||
127 | if ($tunnel->{version} && $tunnel->{version} >= 1) { | |
128 | my $res = eval { $self->read_tunnel($tunnel, 10); }; | |
129 | die "no reply to command '$command': $@\n" if $@; | |
130 | ||
131 | if ($res eq 'OK') { | |
132 | return; | |
133 | } else { | |
134 | die "tunnel replied '$res' to command '$command'\n"; | |
135 | } | |
136 | } | |
137 | } | |
138 | ||
139 | sub fork_tunnel { | |
140 | my ($self, $tunnel_addr) = @_; | |
141 | ||
142 | my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : (); | |
143 | ||
144 | my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ]; | |
145 | ||
146 | my $tunnel = $self->fork_command_pipe($cmd); | |
147 | ||
148 | eval { | |
149 | my $helo = $self->read_tunnel($tunnel, 60); | |
150 | die "no reply\n" if !$helo; | |
151 | die "no quorum on target node\n" if $helo =~ m/^no quorum$/; | |
152 | die "got strange reply from mtunnel ('$helo')\n" | |
153 | if $helo !~ m/^tunnel online$/; | |
154 | }; | |
155 | my $err = $@; | |
156 | ||
157 | eval { | |
158 | my $ver = $self->read_tunnel($tunnel, 10); | |
159 | if ($ver =~ /^ver (\d+)$/) { | |
160 | $tunnel->{version} = $1; | |
161 | $self->log('info', "ssh tunnel $ver\n"); | |
162 | } else { | |
163 | $err = "received invalid tunnel version string '$ver'\n" if !$err; | |
164 | } | |
165 | }; | |
166 | ||
167 | if ($err) { | |
168 | $self->finish_command_pipe($tunnel); | |
169 | die "can't open migration tunnel - $err"; | |
170 | } | |
171 | return $tunnel; | |
172 | } | |
173 | ||
174 | sub finish_tunnel { | |
175 | my ($self, $tunnel) = @_; | |
176 | ||
177 | eval { $self->write_tunnel($tunnel, 30, 'quit'); }; | |
178 | my $err = $@; | |
179 | ||
180 | $self->finish_command_pipe($tunnel, 30); | |
181 | ||
182 | if ($tunnel->{sock_addr}) { | |
183 | # ssh does not clean up on local host | |
184 | my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; # | |
185 | PVE::Tools::run_command($cmd); | |
186 | ||
187 | # .. and just to be sure check on remote side | |
188 | unshift @{$cmd}, @{$self->{rem_ssh}}; | |
189 | PVE::Tools::run_command($cmd); | |
190 | } | |
191 | ||
192 | die $err if $err; | |
193 | } | |
194 | ||
195 | sub lock_vm { | |
196 | my ($self, $vmid, $code, @param) = @_; | |
197 | ||
198 | return PVE::QemuConfig->lock_config($vmid, $code, @param); | |
199 | } | |
200 | ||
201 | sub prepare { | |
202 | my ($self, $vmid) = @_; | |
203 | ||
204 | my $online = $self->{opts}->{online}; | |
205 | ||
206 | $self->{storecfg} = PVE::Storage::config(); | |
207 | ||
208 | # test if VM exists | |
209 | my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid); | |
210 | ||
211 | PVE::QemuConfig->check_lock($conf); | |
212 | ||
213 | my $running = 0; | |
214 | if (my $pid = PVE::QemuServer::check_running($vmid)) { | |
215 | die "can't migrate running VM without --online\n" if !$online; | |
216 | $running = $pid; | |
217 | ||
218 | $self->{forcemachine} = PVE::QemuServer::qemu_machine_pxe($vmid, $conf); | |
219 | ||
220 | } | |
221 | ||
222 | if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) { | |
223 | if ($self->{running} || !$self->{opts}->{force}) { | |
224 | die "can't migrate VM which uses local devices\n"; | |
225 | } else { | |
226 | $self->log('info', "migrating VM which uses local devices"); | |
227 | } | |
228 | } | |
229 | ||
230 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
231 | ||
232 | my $need_activate = []; | |
233 | foreach my $volid (@$vollist) { | |
234 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1); | |
235 | ||
236 | # check if storage is available on both nodes | |
237 | my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid; | |
238 | ||
239 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
240 | PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); | |
241 | ||
242 | if ($scfg->{shared}) { | |
243 | # PVE::Storage::activate_storage checks this for non-shared storages | |
244 | my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); | |
245 | warn "Used shared storage '$sid' is not online on source node!\n" | |
246 | if !$plugin->check_connection($sid, $scfg); | |
247 | } else { | |
248 | # only activate if not shared | |
249 | push @$need_activate, $volid; | |
250 | } | |
251 | } | |
252 | ||
253 | # activate volumes | |
254 | PVE::Storage::activate_volumes($self->{storecfg}, $need_activate); | |
255 | ||
256 | # test ssh connection | |
257 | my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ]; | |
258 | eval { $self->cmd_quiet($cmd); }; | |
259 | die "Can't connect to destination address using public key\n" if $@; | |
260 | ||
261 | return $running; | |
262 | } | |
263 | ||
264 | sub sync_disks { | |
265 | my ($self, $vmid) = @_; | |
266 | ||
267 | my $conf = $self->{vmconf}; | |
268 | ||
269 | # local volumes which have been copied | |
270 | $self->{volumes} = []; | |
271 | ||
272 | eval { | |
273 | ||
274 | # found local volumes and their origin | |
275 | my $local_volumes = {}; | |
276 | my $local_volumes_errors = {}; | |
277 | my $other_errors = []; | |
278 | my $abort = 0; | |
279 | ||
280 | my $sharedvm = 1; | |
281 | ||
282 | my $log_error = sub { | |
283 | my ($msg, $volid) = @_; | |
284 | ||
285 | if (defined($volid)) { | |
286 | $local_volumes_errors->{$volid} = $msg; | |
287 | } else { | |
288 | push @$other_errors, $msg; | |
289 | } | |
290 | $abort = 1; | |
291 | }; | |
292 | ||
293 | my @sids = PVE::Storage::storage_ids($self->{storecfg}); | |
294 | foreach my $storeid (@sids) { | |
295 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid); | |
296 | next if $scfg->{shared}; | |
297 | next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1); | |
298 | ||
299 | # get list from PVE::Storage (for unused volumes) | |
300 | my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid); | |
301 | ||
302 | next if @{$dl->{$storeid}} == 0; | |
303 | ||
304 | my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $storeid; | |
305 | ||
306 | # check if storage is available on target node | |
307 | PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); | |
308 | $sharedvm = 0; # there is a non-shared disk | |
309 | ||
310 | PVE::Storage::foreach_volid($dl, sub { | |
311 | my ($volid, $sid, $volname) = @_; | |
312 | ||
313 | $local_volumes->{$volid}->{ref} = 'storage'; | |
314 | }); | |
315 | } | |
316 | ||
317 | my $test_volid = sub { | |
318 | my ($volid, $attr) = @_; | |
319 | ||
320 | if ($volid =~ m|^/|) { | |
321 | return if $attr->{shared}; | |
322 | $local_volumes->{$volid}->{ref} = 'config'; | |
323 | die "local file/device\n"; | |
324 | } | |
325 | ||
326 | my $snaprefs = $attr->{referenced_in_snapshot}; | |
327 | ||
328 | if ($attr->{cdrom}) { | |
329 | if ($volid eq 'cdrom') { | |
330 | my $msg = "can't migrate local cdrom drive"; | |
331 | if (defined($snaprefs) && !$attr->{referenced_in_config}) { | |
332 | my $snapnames = join(', ', sort keys %$snaprefs); | |
333 | $msg .= " (referenced in snapshot - $snapnames)"; | |
334 | } | |
335 | &$log_error("$msg\n"); | |
336 | return; | |
337 | } | |
338 | return if $volid eq 'none'; | |
339 | } | |
340 | ||
341 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
342 | ||
343 | my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid; | |
344 | # check if storage is available on both nodes | |
345 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
346 | PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); | |
347 | ||
348 | return if $scfg->{shared}; | |
349 | ||
350 | $sharedvm = 0; | |
351 | ||
352 | $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot'; | |
353 | ||
354 | die "local cdrom image\n" if $attr->{cdrom}; | |
355 | ||
356 | my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid); | |
357 | ||
358 | die "owned by other VM (owner = VM $owner)\n" | |
359 | if !$owner || ($owner != $self->{vmid}); | |
360 | ||
361 | my $format = PVE::QemuServer::qemu_img_format($scfg, $volname); | |
362 | $local_volumes->{$volid}->{snapshots} = defined($snaprefs) || ($format =~ /^(?:qcow2|vmdk)$/); | |
363 | if (defined($snaprefs)) { | |
364 | # we cannot migrate shapshots on local storage | |
365 | # exceptions: 'zfspool' or 'qcow2' files (on directory storage) | |
366 | ||
367 | die "online storage migration not possible if snapshot exists\n" if $self->{running}; | |
368 | if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) { | |
369 | die "non-migratable snapshot exists\n"; | |
370 | } | |
371 | } | |
372 | ||
373 | die "referenced by linked clone(s)\n" | |
374 | if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid); | |
375 | }; | |
376 | ||
377 | PVE::QemuServer::foreach_volid($conf, sub { | |
378 | my ($volid, $attr) = @_; | |
379 | eval { $test_volid->($volid, $attr); }; | |
380 | if (my $err = $@) { | |
381 | &$log_error($err, $volid); | |
382 | } | |
383 | }); | |
384 | ||
385 | foreach my $vol (sort keys %$local_volumes) { | |
386 | my $ref = $local_volumes->{$vol}->{ref}; | |
387 | if ($ref eq 'storage') { | |
388 | $self->log('info', "found local disk '$vol' (via storage)\n"); | |
389 | } elsif ($ref eq 'config') { | |
390 | &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol) | |
391 | if $self->{running} && !$self->{opts}->{"with-local-disks"}; | |
392 | $self->log('info', "found local disk '$vol' (in current VM config)\n"); | |
393 | } elsif ($ref eq 'snapshot') { | |
394 | $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n"); | |
395 | } else { | |
396 | $self->log('info', "found local disk '$vol'\n"); | |
397 | } | |
398 | } | |
399 | ||
400 | foreach my $vol (sort keys %$local_volumes_errors) { | |
401 | $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}"); | |
402 | } | |
403 | foreach my $err (@$other_errors) { | |
404 | $self->log('warn', "$err"); | |
405 | } | |
406 | ||
407 | if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) { | |
408 | $self->{opts}->{targetstorage} = 1; #use same sid for remote local | |
409 | } | |
410 | ||
411 | if ($abort) { | |
412 | die "can't migrate VM - check log\n"; | |
413 | } | |
414 | ||
415 | # additional checks for local storage | |
416 | foreach my $volid (keys %$local_volumes) { | |
417 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
418 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid); | |
419 | ||
420 | my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') || | |
421 | ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm'); | |
422 | ||
423 | die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n" | |
424 | if !$migratable; | |
425 | ||
426 | # image is a linked clone on local storage, se we can't migrate. | |
427 | if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) { | |
428 | die "can't migrate '$volid' as it's a clone of '$basename'"; | |
429 | } | |
430 | } | |
431 | ||
432 | my $rep_volumes; | |
433 | ||
434 | $self->log('info', "copying disk images"); | |
435 | ||
436 | my $rep_cfg = PVE::ReplicationConfig->new(); | |
437 | ||
438 | if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) { | |
439 | die "can't live migrate VM with replicated volumes\n" if $self->{running}; | |
440 | my $start_time = time(); | |
441 | my $logfunc = sub { my ($msg) = @_; $self->log('info', $msg); }; | |
442 | $rep_volumes = PVE::Replication::run_replication( | |
443 | 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc); | |
444 | $self->{replicated_volumes} = $rep_volumes; | |
445 | } | |
446 | ||
447 | foreach my $volid (keys %$local_volumes) { | |
448 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
449 | if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid}->{ref} eq 'config') { | |
450 | push @{$self->{online_local_volumes}}, $volid; | |
451 | } else { | |
452 | next if $rep_volumes->{$volid}; | |
453 | push @{$self->{volumes}}, $volid; | |
454 | my $insecure = $self->{opts}->{migration_type} eq 'insecure'; | |
455 | my $with_snapshots = $local_volumes->{$volid}->{snapshots}; | |
456 | PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid, | |
457 | undef, undef, undef, undef, $insecure, $with_snapshots); | |
458 | } | |
459 | } | |
460 | }; | |
461 | die "Failed to sync data - $@" if $@; | |
462 | } | |
463 | ||
464 | sub cleanup_remotedisks { | |
465 | my ($self) = @_; | |
466 | ||
467 | foreach my $target_drive (keys %{$self->{target_drive}}) { | |
468 | ||
469 | my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid}); | |
470 | my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file}); | |
471 | ||
472 | my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"]; | |
473 | ||
474 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
475 | if (my $err = $@) { | |
476 | $self->log('err', $err); | |
477 | $self->{errors} = 1; | |
478 | } | |
479 | } | |
480 | } | |
481 | ||
482 | sub phase1 { | |
483 | my ($self, $vmid) = @_; | |
484 | ||
485 | $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})"); | |
486 | ||
487 | my $conf = $self->{vmconf}; | |
488 | ||
489 | # set migrate lock in config file | |
490 | $conf->{lock} = 'migrate'; | |
491 | PVE::QemuConfig->write_config($vmid, $conf); | |
492 | ||
493 | sync_disks($self, $vmid); | |
494 | ||
495 | }; | |
496 | ||
497 | sub phase1_cleanup { | |
498 | my ($self, $vmid, $err) = @_; | |
499 | ||
500 | $self->log('info', "aborting phase 1 - cleanup resources"); | |
501 | ||
502 | my $conf = $self->{vmconf}; | |
503 | delete $conf->{lock}; | |
504 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; | |
505 | if (my $err = $@) { | |
506 | $self->log('err', $err); | |
507 | } | |
508 | ||
509 | if ($self->{volumes}) { | |
510 | foreach my $volid (@{$self->{volumes}}) { | |
511 | $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'"); | |
512 | # fixme: try to remove ? | |
513 | } | |
514 | } | |
515 | } | |
516 | ||
517 | sub phase2 { | |
518 | my ($self, $vmid) = @_; | |
519 | ||
520 | my $conf = $self->{vmconf}; | |
521 | ||
522 | $self->log('info', "starting VM $vmid on remote node '$self->{node}'"); | |
523 | ||
524 | my $raddr; | |
525 | my $rport; | |
526 | my $ruri; # the whole migration dst. URI (protocol:address[:port]) | |
527 | my $nodename = PVE::INotify::nodename(); | |
528 | ||
529 | ## start on remote node | |
530 | my $cmd = [@{$self->{rem_ssh}}]; | |
531 | ||
532 | my $spice_ticket; | |
533 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { | |
534 | my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice'); | |
535 | $spice_ticket = $res->{ticket}; | |
536 | } | |
537 | ||
538 | push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename; | |
539 | ||
540 | my $migration_type = $self->{opts}->{migration_type}; | |
541 | ||
542 | push @$cmd, '--migration_type', $migration_type; | |
543 | ||
544 | push @$cmd, '--migration_network', $self->{opts}->{migration_network} | |
545 | if $self->{opts}->{migration_network}; | |
546 | ||
547 | if ($migration_type eq 'insecure') { | |
548 | push @$cmd, '--stateuri', 'tcp'; | |
549 | } else { | |
550 | push @$cmd, '--stateuri', 'unix'; | |
551 | } | |
552 | ||
553 | if ($self->{forcemachine}) { | |
554 | push @$cmd, '--machine', $self->{forcemachine}; | |
555 | } | |
556 | ||
557 | if ($self->{opts}->{targetstorage}) { | |
558 | push @$cmd, '--targetstorage', $self->{opts}->{targetstorage}; | |
559 | } | |
560 | ||
561 | my $spice_port; | |
562 | ||
563 | # Note: We try to keep $spice_ticket secret (do not pass via command line parameter) | |
564 | # instead we pipe it through STDIN | |
565 | PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub { | |
566 | my $line = shift; | |
567 | ||
568 | if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) { | |
569 | $raddr = $1; | |
570 | $rport = int($2); | |
571 | $ruri = "tcp:$raddr:$rport"; | |
572 | } | |
573 | elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) { | |
574 | $raddr = $1; | |
575 | die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2; | |
576 | $ruri = "unix:$raddr"; | |
577 | } | |
578 | elsif ($line =~ m/^migration listens on port (\d+)$/) { | |
579 | $raddr = "localhost"; | |
580 | $rport = int($1); | |
581 | $ruri = "tcp:$raddr:$rport"; | |
582 | } | |
583 | elsif ($line =~ m/^spice listens on port (\d+)$/) { | |
584 | $spice_port = int($1); | |
585 | } | |
586 | elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) { | |
587 | my $volid = $4; | |
588 | my $nbd_uri = "nbd:$1:$2:exportname=$3"; | |
589 | my $targetdrive = $3; | |
590 | $targetdrive =~ s/drive-//g; | |
591 | ||
592 | $self->{target_drive}->{$targetdrive}->{volid} = $volid; | |
593 | $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri; | |
594 | ||
595 | } | |
596 | }, errfunc => sub { | |
597 | my $line = shift; | |
598 | $self->log('info', $line); | |
599 | }); | |
600 | ||
601 | die "unable to detect remote migration address\n" if !$raddr; | |
602 | ||
603 | $self->log('info', "start remote tunnel"); | |
604 | ||
605 | if ($migration_type eq 'secure') { | |
606 | ||
607 | if ($ruri =~ /^unix:/) { | |
608 | unlink $raddr; | |
609 | $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr"); | |
610 | $self->{tunnel}->{sock_addr} = $raddr; | |
611 | ||
612 | my $unix_socket_try = 0; # wait for the socket to become ready | |
613 | while (! -S $raddr) { | |
614 | $unix_socket_try++; | |
615 | if ($unix_socket_try > 100) { | |
616 | $self->{errors} = 1; | |
617 | $self->finish_tunnel($self->{tunnel}); | |
618 | die "Timeout, migration socket $ruri did not get ready"; | |
619 | } | |
620 | ||
621 | usleep(50000); | |
622 | } | |
623 | ||
624 | } elsif ($ruri =~ /^tcp:/) { | |
625 | my $tunnel_addr; | |
626 | if ($raddr eq "localhost") { | |
627 | # for backwards compatibility with older qemu-server versions | |
628 | my $pfamily = PVE::Tools::get_host_address_family($nodename); | |
629 | my $lport = PVE::Tools::next_migrate_port($pfamily); | |
630 | $tunnel_addr = "$lport:localhost:$rport"; | |
631 | } | |
632 | ||
633 | $self->{tunnel} = $self->fork_tunnel($tunnel_addr); | |
634 | ||
635 | } else { | |
636 | die "unsupported protocol in migration URI: $ruri\n"; | |
637 | } | |
638 | } else { | |
639 | #fork tunnel for insecure migration, to send faster commands like resume | |
640 | $self->{tunnel} = $self->fork_tunnel(); | |
641 | } | |
642 | ||
643 | my $start = time(); | |
644 | ||
645 | if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) { | |
646 | $self->{storage_migration} = 1; | |
647 | $self->{storage_migration_jobs} = {}; | |
648 | $self->log('info', "starting storage migration"); | |
649 | ||
650 | die "The number of local disks does not match between the source and the destination.\n" | |
651 | if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}}); | |
652 | foreach my $drive (keys %{$self->{target_drive}}){ | |
653 | my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri}; | |
654 | $self->log('info', "$drive: start migration to to $nbd_uri"); | |
655 | PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1); | |
656 | } | |
657 | } | |
658 | ||
659 | $self->log('info', "starting online/live migration on $ruri"); | |
660 | $self->{livemigration} = 1; | |
661 | ||
662 | # load_defaults | |
663 | my $defaults = PVE::QemuServer::load_defaults(); | |
664 | ||
665 | # always set migrate speed (overwrite kvm default of 32m) | |
666 | # we set a very hight default of 8192m which is basically unlimited | |
667 | my $migrate_speed = $defaults->{migrate_speed} || 8192; | |
668 | $migrate_speed = $conf->{migrate_speed} || $migrate_speed; | |
669 | $migrate_speed = $migrate_speed * 1048576; | |
670 | $self->log('info', "migrate_set_speed: $migrate_speed"); | |
671 | eval { | |
672 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed)); | |
673 | }; | |
674 | $self->log('info', "migrate_set_speed error: $@") if $@; | |
675 | ||
676 | my $migrate_downtime = $defaults->{migrate_downtime}; | |
677 | $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime}); | |
678 | if (defined($migrate_downtime)) { | |
679 | $self->log('info', "migrate_set_downtime: $migrate_downtime"); | |
680 | eval { | |
681 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); | |
682 | }; | |
683 | $self->log('info', "migrate_set_downtime error: $@") if $@; | |
684 | } | |
685 | ||
686 | $self->log('info', "set migration_caps"); | |
687 | eval { | |
688 | PVE::QemuServer::set_migration_caps($vmid); | |
689 | }; | |
690 | warn $@ if $@; | |
691 | ||
692 | # set cachesize to 10% of the total memory | |
693 | my $memory = $conf->{memory} || $defaults->{memory}; | |
694 | my $cachesize = int($memory * 1048576 / 10); | |
695 | $self->log('info', "set cachesize: $cachesize"); | |
696 | eval { | |
697 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize)); | |
698 | }; | |
699 | $self->log('info', "migrate-set-cache-size error: $@") if $@; | |
700 | ||
701 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { | |
702 | my $rpcenv = PVE::RPCEnvironment::get(); | |
703 | my $authuser = $rpcenv->get_user(); | |
704 | ||
705 | my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node}); | |
706 | ||
707 | my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem"; | |
708 | my $subject = PVE::AccessControl::read_x509_subject_spice($filename); | |
709 | ||
710 | $self->log('info', "spice client_migrate_info"); | |
711 | ||
712 | eval { | |
713 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice', | |
714 | hostname => $proxyticket, 'tls-port' => $spice_port, | |
715 | 'cert-subject' => $subject); | |
716 | }; | |
717 | $self->log('info', "client_migrate_info error: $@") if $@; | |
718 | ||
719 | } | |
720 | ||
721 | $self->log('info', "start migrate command to $ruri"); | |
722 | eval { | |
723 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri); | |
724 | }; | |
725 | my $merr = $@; | |
726 | $self->log('info', "migrate uri => $ruri failed: $merr") if $merr; | |
727 | ||
728 | my $lstat = 0; | |
729 | my $usleep = 1000000; | |
730 | my $i = 0; | |
731 | my $err_count = 0; | |
732 | my $lastrem = undef; | |
733 | my $downtimecounter = 0; | |
734 | while (1) { | |
735 | $i++; | |
736 | my $avglstat = $lstat/$i if $lstat; | |
737 | ||
738 | usleep($usleep); | |
739 | my $stat; | |
740 | eval { | |
741 | $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate"); | |
742 | }; | |
743 | if (my $err = $@) { | |
744 | $err_count++; | |
745 | warn "query migrate failed: $err\n"; | |
746 | $self->log('info', "query migrate failed: $err"); | |
747 | if ($err_count <= 5) { | |
748 | usleep(1000000); | |
749 | next; | |
750 | } | |
751 | die "too many query migrate failures - aborting\n"; | |
752 | } | |
753 | ||
754 | if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) { | |
755 | sleep(1); | |
756 | next; | |
757 | } | |
758 | ||
759 | if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) { | |
760 | $merr = undef; | |
761 | $err_count = 0; | |
762 | if ($stat->{status} eq 'completed') { | |
763 | my $delay = time() - $start; | |
764 | if ($delay > 0) { | |
765 | my $mbps = sprintf "%.2f", $memory / $delay; | |
766 | my $downtime = $stat->{downtime} || 0; | |
767 | $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms"); | |
768 | } | |
769 | } | |
770 | ||
771 | if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') { | |
772 | $self->log('info', "migration status error: $stat->{status}"); | |
773 | die "aborting\n" | |
774 | } | |
775 | ||
776 | if ($stat->{status} ne 'active') { | |
777 | $self->log('info', "migration status: $stat->{status}"); | |
778 | last; | |
779 | } | |
780 | ||
781 | if ($stat->{ram}->{transferred} ne $lstat) { | |
782 | my $trans = $stat->{ram}->{transferred} || 0; | |
783 | my $rem = $stat->{ram}->{remaining} || 0; | |
784 | my $total = $stat->{ram}->{total} || 0; | |
785 | my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0; | |
786 | my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0; | |
787 | my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0; | |
788 | my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0; | |
789 | my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0; | |
790 | # reduce sleep if remainig memory is lower than the average transfer speed | |
791 | $usleep = 100000 if $avglstat && $rem < $avglstat; | |
792 | ||
793 | $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " . | |
794 | "remaining ${rem}), total ${total})"); | |
795 | ||
796 | if (${xbzrlecachesize}) { | |
797 | $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}"); | |
798 | } | |
799 | ||
800 | if (($lastrem && $rem > $lastrem ) || ($rem == 0)) { | |
801 | $downtimecounter++; | |
802 | } | |
803 | $lastrem = $rem; | |
804 | ||
805 | if ($downtimecounter > 5) { | |
806 | $downtimecounter = 0; | |
807 | $migrate_downtime *= 2; | |
808 | $self->log('info', "migrate_set_downtime: $migrate_downtime"); | |
809 | eval { | |
810 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); | |
811 | }; | |
812 | $self->log('info', "migrate_set_downtime error: $@") if $@; | |
813 | } | |
814 | ||
815 | } | |
816 | ||
817 | ||
818 | $lstat = $stat->{ram}->{transferred}; | |
819 | ||
820 | } else { | |
821 | die $merr if $merr; | |
822 | die "unable to parse migration status '$stat->{status}' - aborting\n"; | |
823 | } | |
824 | } | |
825 | } | |
826 | ||
827 | sub phase2_cleanup { | |
828 | my ($self, $vmid, $err) = @_; | |
829 | ||
830 | return if !$self->{errors}; | |
831 | $self->{phase2errors} = 1; | |
832 | ||
833 | $self->log('info', "aborting phase 2 - cleanup resources"); | |
834 | ||
835 | $self->log('info', "migrate_cancel"); | |
836 | eval { | |
837 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel"); | |
838 | }; | |
839 | $self->log('info', "migrate_cancel error: $@") if $@; | |
840 | ||
841 | my $conf = $self->{vmconf}; | |
842 | delete $conf->{lock}; | |
843 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; | |
844 | if (my $err = $@) { | |
845 | $self->log('err', $err); | |
846 | } | |
847 | ||
848 | # cleanup ressources on target host | |
849 | if ($self->{storage_migration}) { | |
850 | ||
851 | eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; | |
852 | if (my $err = $@) { | |
853 | $self->log('err', $err); | |
854 | } | |
855 | ||
856 | eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; | |
857 | if (my $err = $@) { | |
858 | $self->log('err', $err); | |
859 | } | |
860 | } | |
861 | ||
862 | my $nodename = PVE::INotify::nodename(); | |
863 | ||
864 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename]; | |
865 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
866 | if (my $err = $@) { | |
867 | $self->log('err', $err); | |
868 | $self->{errors} = 1; | |
869 | } | |
870 | ||
871 | if ($self->{tunnel}) { | |
872 | eval { finish_tunnel($self, $self->{tunnel}); }; | |
873 | if (my $err = $@) { | |
874 | $self->log('err', $err); | |
875 | $self->{errors} = 1; | |
876 | } | |
877 | } | |
878 | } | |
879 | ||
880 | sub phase3 { | |
881 | my ($self, $vmid) = @_; | |
882 | ||
883 | my $volids = $self->{volumes}; | |
884 | return if $self->{phase2errors}; | |
885 | ||
886 | # destroy local copies | |
887 | foreach my $volid (@$volids) { | |
888 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; | |
889 | if (my $err = $@) { | |
890 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
891 | $self->{errors} = 1; | |
892 | last if $err =~ /^interrupted by signal$/; | |
893 | } | |
894 | } | |
895 | } | |
896 | ||
897 | sub phase3_cleanup { | |
898 | my ($self, $vmid, $err) = @_; | |
899 | ||
900 | my $conf = $self->{vmconf}; | |
901 | return if $self->{phase2errors}; | |
902 | ||
903 | my $tunnel = $self->{tunnel}; | |
904 | ||
905 | if ($self->{storage_migration}) { | |
906 | # finish block-job | |
907 | eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); }; | |
908 | ||
909 | if (my $err = $@) { | |
910 | eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; | |
911 | eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; | |
912 | die "Failed to completed storage migration\n"; | |
913 | } else { | |
914 | foreach my $target_drive (keys %{$self->{target_drive}}) { | |
915 | my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid}); | |
916 | $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive); | |
917 | PVE::QemuConfig->write_config($vmid, $conf); | |
918 | } | |
919 | } | |
920 | } | |
921 | ||
922 | # transfer replication state before move config | |
923 | $self->transfer_replication_state() if $self->{replicated_volumes}; | |
924 | ||
925 | # move config to remote node | |
926 | my $conffile = PVE::QemuConfig->config_file($vmid); | |
927 | my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node}); | |
928 | ||
929 | die "Failed to move config to node '$self->{node}' - rename failed: $!\n" | |
930 | if !rename($conffile, $newconffile); | |
931 | ||
932 | $self->switch_replication_job_target() if $self->{replicated_volumes}; | |
933 | ||
934 | if ($self->{livemigration}) { | |
935 | if ($self->{storage_migration}) { | |
936 | # stop nbd server on remote vm - requirement for resume since 2.9 | |
937 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid]; | |
938 | ||
939 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
940 | if (my $err = $@) { | |
941 | $self->log('err', $err); | |
942 | $self->{errors} = 1; | |
943 | } | |
944 | } | |
945 | ||
946 | # config moved and nbd server stopped - now we can resume vm on target | |
947 | if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) { | |
948 | eval { | |
949 | $self->write_tunnel($tunnel, 30, "resume $vmid"); | |
950 | }; | |
951 | if (my $err = $@) { | |
952 | $self->log('err', $err); | |
953 | $self->{errors} = 1; | |
954 | } | |
955 | } else { | |
956 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck']; | |
957 | my $logf = sub { | |
958 | my $line = shift; | |
959 | $self->log('err', $line); | |
960 | }; | |
961 | eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); }; | |
962 | if (my $err = $@) { | |
963 | $self->log('err', $err); | |
964 | $self->{errors} = 1; | |
965 | } | |
966 | } | |
967 | } | |
968 | ||
969 | # close tunnel on successful migration, on error phase2_cleanup closed it | |
970 | if ($tunnel) { | |
971 | eval { finish_tunnel($self, $tunnel); }; | |
972 | if (my $err = $@) { | |
973 | $self->log('err', $err); | |
974 | $self->{errors} = 1; | |
975 | } | |
976 | } | |
977 | ||
978 | eval { | |
979 | my $timer = 0; | |
980 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) { | |
981 | $self->log('info', "Waiting for spice server migration"); | |
982 | while (1) { | |
983 | my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice'); | |
984 | last if int($res->{'migrated'}) == 1; | |
985 | last if $timer > 50; | |
986 | $timer ++; | |
987 | usleep(200000); | |
988 | } | |
989 | } | |
990 | }; | |
991 | ||
992 | # always stop local VM | |
993 | eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); }; | |
994 | if (my $err = $@) { | |
995 | $self->log('err', "stopping vm failed - $err"); | |
996 | $self->{errors} = 1; | |
997 | } | |
998 | ||
999 | # always deactivate volumes - avoid lvm LVs to be active on several nodes | |
1000 | eval { | |
1001 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
1002 | PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist); | |
1003 | }; | |
1004 | if (my $err = $@) { | |
1005 | $self->log('err', $err); | |
1006 | $self->{errors} = 1; | |
1007 | } | |
1008 | ||
1009 | if($self->{storage_migration}) { | |
1010 | # destroy local copies | |
1011 | my $volids = $self->{online_local_volumes}; | |
1012 | ||
1013 | foreach my $volid (@$volids) { | |
1014 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; | |
1015 | if (my $err = $@) { | |
1016 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
1017 | $self->{errors} = 1; | |
1018 | last if $err =~ /^interrupted by signal$/; | |
1019 | } | |
1020 | } | |
1021 | ||
1022 | } | |
1023 | ||
1024 | # clear migrate lock | |
1025 | my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ]; | |
1026 | $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock"); | |
1027 | } | |
1028 | ||
1029 | sub final_cleanup { | |
1030 | my ($self, $vmid) = @_; | |
1031 | ||
1032 | # nothing to do | |
1033 | } | |
1034 | ||
1035 | 1; |