]>
Commit | Line | Data |
---|---|---|
1 | package PVE::QemuMigrate; | |
2 | ||
3 | use strict; | |
4 | use warnings; | |
5 | use PVE::AbstractMigrate; | |
6 | use IO::File; | |
7 | use IPC::Open2; | |
8 | use POSIX qw( WNOHANG ); | |
9 | use PVE::INotify; | |
10 | use PVE::Tools; | |
11 | use PVE::Cluster; | |
12 | use PVE::Storage; | |
13 | use PVE::QemuServer; | |
14 | use PVE::QemuServer::Machine; | |
15 | use PVE::QemuServer::Monitor qw(mon_cmd); | |
16 | use Time::HiRes qw( usleep ); | |
17 | use PVE::RPCEnvironment; | |
18 | use PVE::ReplicationConfig; | |
19 | use PVE::ReplicationState; | |
20 | use PVE::Replication; | |
21 | ||
22 | use base qw(PVE::AbstractMigrate); | |
23 | ||
24 | sub fork_command_pipe { | |
25 | my ($self, $cmd) = @_; | |
26 | ||
27 | my $reader = IO::File->new(); | |
28 | my $writer = IO::File->new(); | |
29 | ||
30 | my $orig_pid = $$; | |
31 | ||
32 | my $cpid; | |
33 | ||
34 | eval { $cpid = open2($reader, $writer, @$cmd); }; | |
35 | ||
36 | my $err = $@; | |
37 | ||
38 | # catch exec errors | |
39 | if ($orig_pid != $$) { | |
40 | $self->log('err', "can't fork command pipe\n"); | |
41 | POSIX::_exit(1); | |
42 | kill('KILL', $$); | |
43 | } | |
44 | ||
45 | die $err if $err; | |
46 | ||
47 | return { writer => $writer, reader => $reader, pid => $cpid }; | |
48 | } | |
49 | ||
50 | sub finish_command_pipe { | |
51 | my ($self, $cmdpipe, $timeout) = @_; | |
52 | ||
53 | my $cpid = $cmdpipe->{pid}; | |
54 | return if !defined($cpid); | |
55 | ||
56 | my $writer = $cmdpipe->{writer}; | |
57 | my $reader = $cmdpipe->{reader}; | |
58 | ||
59 | $writer->close(); | |
60 | $reader->close(); | |
61 | ||
62 | my $collect_child_process = sub { | |
63 | my $res = waitpid($cpid, WNOHANG); | |
64 | if (defined($res) && ($res == $cpid)) { | |
65 | delete $cmdpipe->{cpid}; | |
66 | return 1; | |
67 | } else { | |
68 | return 0; | |
69 | } | |
70 | }; | |
71 | ||
72 | if ($timeout) { | |
73 | for (my $i = 0; $i < $timeout; $i++) { | |
74 | return if &$collect_child_process(); | |
75 | sleep(1); | |
76 | } | |
77 | } | |
78 | ||
79 | $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n"); | |
80 | kill(15, $cpid); | |
81 | ||
82 | # wait again | |
83 | for (my $i = 0; $i < 10; $i++) { | |
84 | return if &$collect_child_process(); | |
85 | sleep(1); | |
86 | } | |
87 | ||
88 | $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n"); | |
89 | kill 9, $cpid; | |
90 | sleep 1; | |
91 | ||
92 | $self->log('err', "ssh tunnel child process (PID $cpid) couldn't be collected\n") | |
93 | if !&$collect_child_process(); | |
94 | } | |
95 | ||
96 | sub read_tunnel { | |
97 | my ($self, $tunnel, $timeout) = @_; | |
98 | ||
99 | $timeout = 60 if !defined($timeout); | |
100 | ||
101 | my $reader = $tunnel->{reader}; | |
102 | ||
103 | my $output; | |
104 | eval { | |
105 | PVE::Tools::run_with_timeout($timeout, sub { $output = <$reader>; }); | |
106 | }; | |
107 | die "reading from tunnel failed: $@\n" if $@; | |
108 | ||
109 | chomp $output; | |
110 | ||
111 | return $output; | |
112 | } | |
113 | ||
114 | sub write_tunnel { | |
115 | my ($self, $tunnel, $timeout, $command) = @_; | |
116 | ||
117 | $timeout = 60 if !defined($timeout); | |
118 | ||
119 | my $writer = $tunnel->{writer}; | |
120 | ||
121 | eval { | |
122 | PVE::Tools::run_with_timeout($timeout, sub { | |
123 | print $writer "$command\n"; | |
124 | $writer->flush(); | |
125 | }); | |
126 | }; | |
127 | die "writing to tunnel failed: $@\n" if $@; | |
128 | ||
129 | if ($tunnel->{version} && $tunnel->{version} >= 1) { | |
130 | my $res = eval { $self->read_tunnel($tunnel, 10); }; | |
131 | die "no reply to command '$command': $@\n" if $@; | |
132 | ||
133 | if ($res eq 'OK') { | |
134 | return; | |
135 | } else { | |
136 | die "tunnel replied '$res' to command '$command'\n"; | |
137 | } | |
138 | } | |
139 | } | |
140 | ||
141 | sub fork_tunnel { | |
142 | my ($self, $tunnel_addr) = @_; | |
143 | ||
144 | my @localtunnelinfo = defined($tunnel_addr) ? ('-L' , $tunnel_addr ) : (); | |
145 | ||
146 | my $cmd = [@{$self->{rem_ssh}}, '-o ExitOnForwardFailure=yes', @localtunnelinfo, '/usr/sbin/qm', 'mtunnel' ]; | |
147 | ||
148 | my $tunnel = $self->fork_command_pipe($cmd); | |
149 | ||
150 | eval { | |
151 | my $helo = $self->read_tunnel($tunnel, 60); | |
152 | die "no reply\n" if !$helo; | |
153 | die "no quorum on target node\n" if $helo =~ m/^no quorum$/; | |
154 | die "got strange reply from mtunnel ('$helo')\n" | |
155 | if $helo !~ m/^tunnel online$/; | |
156 | }; | |
157 | my $err = $@; | |
158 | ||
159 | eval { | |
160 | my $ver = $self->read_tunnel($tunnel, 10); | |
161 | if ($ver =~ /^ver (\d+)$/) { | |
162 | $tunnel->{version} = $1; | |
163 | $self->log('info', "ssh tunnel $ver\n"); | |
164 | } else { | |
165 | $err = "received invalid tunnel version string '$ver'\n" if !$err; | |
166 | } | |
167 | }; | |
168 | ||
169 | if ($err) { | |
170 | $self->finish_command_pipe($tunnel); | |
171 | die "can't open migration tunnel - $err"; | |
172 | } | |
173 | return $tunnel; | |
174 | } | |
175 | ||
176 | sub finish_tunnel { | |
177 | my ($self, $tunnel) = @_; | |
178 | ||
179 | eval { $self->write_tunnel($tunnel, 30, 'quit'); }; | |
180 | my $err = $@; | |
181 | ||
182 | $self->finish_command_pipe($tunnel, 30); | |
183 | ||
184 | if ($tunnel->{sock_addr}) { | |
185 | # ssh does not clean up on local host | |
186 | my $cmd = ['rm', '-f', $tunnel->{sock_addr}]; # | |
187 | PVE::Tools::run_command($cmd); | |
188 | ||
189 | # .. and just to be sure check on remote side | |
190 | unshift @{$cmd}, @{$self->{rem_ssh}}; | |
191 | PVE::Tools::run_command($cmd); | |
192 | } | |
193 | ||
194 | die $err if $err; | |
195 | } | |
196 | ||
197 | sub lock_vm { | |
198 | my ($self, $vmid, $code, @param) = @_; | |
199 | ||
200 | return PVE::QemuConfig->lock_config($vmid, $code, @param); | |
201 | } | |
202 | ||
203 | sub prepare { | |
204 | my ($self, $vmid) = @_; | |
205 | ||
206 | my $online = $self->{opts}->{online}; | |
207 | ||
208 | $self->{storecfg} = PVE::Storage::config(); | |
209 | ||
210 | # test if VM exists | |
211 | my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid); | |
212 | ||
213 | PVE::QemuConfig->check_lock($conf); | |
214 | ||
215 | my $running = 0; | |
216 | if (my $pid = PVE::QemuServer::check_running($vmid)) { | |
217 | die "can't migrate running VM without --online\n" if !$online; | |
218 | $running = $pid; | |
219 | ||
220 | $self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf); | |
221 | ||
222 | } | |
223 | my $loc_res = PVE::QemuServer::check_local_resources($conf, 1); | |
224 | if (scalar @$loc_res) { | |
225 | if ($self->{running} || !$self->{opts}->{force}) { | |
226 | die "can't migrate VM which uses local devices: " . join(", ", @$loc_res) . "\n"; | |
227 | } else { | |
228 | $self->log('info', "migrating VM which uses local devices"); | |
229 | } | |
230 | } | |
231 | ||
232 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
233 | ||
234 | my $need_activate = []; | |
235 | foreach my $volid (@$vollist) { | |
236 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1); | |
237 | ||
238 | # check if storage is available on both nodes | |
239 | my $targetsid = $self->{opts}->{targetstorage} // $sid; | |
240 | ||
241 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
242 | PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); | |
243 | ||
244 | if ($scfg->{shared}) { | |
245 | # PVE::Storage::activate_storage checks this for non-shared storages | |
246 | my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); | |
247 | warn "Used shared storage '$sid' is not online on source node!\n" | |
248 | if !$plugin->check_connection($sid, $scfg); | |
249 | } else { | |
250 | # only activate if not shared | |
251 | next if ($volid =~ m/vm-\d+-cloudinit/); | |
252 | push @$need_activate, $volid; | |
253 | } | |
254 | } | |
255 | ||
256 | # activate volumes | |
257 | PVE::Storage::activate_volumes($self->{storecfg}, $need_activate); | |
258 | ||
259 | # test ssh connection | |
260 | my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ]; | |
261 | eval { $self->cmd_quiet($cmd); }; | |
262 | die "Can't connect to destination address using public key\n" if $@; | |
263 | ||
264 | return $running; | |
265 | } | |
266 | ||
267 | sub sync_disks { | |
268 | my ($self, $vmid) = @_; | |
269 | ||
270 | my $conf = $self->{vmconf}; | |
271 | ||
272 | # local volumes which have been copied | |
273 | $self->{volumes} = []; | |
274 | ||
275 | my $override_targetsid = $self->{opts}->{targetstorage}; | |
276 | ||
277 | eval { | |
278 | ||
279 | # found local volumes and their origin | |
280 | my $local_volumes = {}; | |
281 | my $local_volumes_errors = {}; | |
282 | my $other_errors = []; | |
283 | my $abort = 0; | |
284 | ||
285 | my $sharedvm = 1; | |
286 | ||
287 | my $log_error = sub { | |
288 | my ($msg, $volid) = @_; | |
289 | ||
290 | if (defined($volid)) { | |
291 | $local_volumes_errors->{$volid} = $msg; | |
292 | } else { | |
293 | push @$other_errors, $msg; | |
294 | } | |
295 | $abort = 1; | |
296 | }; | |
297 | ||
298 | my @sids = PVE::Storage::storage_ids($self->{storecfg}); | |
299 | foreach my $storeid (@sids) { | |
300 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid); | |
301 | next if $scfg->{shared}; | |
302 | next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1); | |
303 | ||
304 | # get list from PVE::Storage (for unused volumes) | |
305 | my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid); | |
306 | ||
307 | next if @{$dl->{$storeid}} == 0; | |
308 | ||
309 | my $targetsid = $override_targetsid // $storeid; | |
310 | ||
311 | # check if storage is available on target node | |
312 | PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); | |
313 | $sharedvm = 0; # there is a non-shared disk | |
314 | ||
315 | PVE::Storage::foreach_volid($dl, sub { | |
316 | my ($volid, $sid, $volname) = @_; | |
317 | ||
318 | $local_volumes->{$volid}->{ref} = 'storage'; | |
319 | }); | |
320 | } | |
321 | ||
322 | my $test_volid = sub { | |
323 | my ($volid, $attr) = @_; | |
324 | ||
325 | if ($volid =~ m|^/|) { | |
326 | return if $attr->{shared}; | |
327 | $local_volumes->{$volid}->{ref} = 'config'; | |
328 | die "local file/device\n"; | |
329 | } | |
330 | ||
331 | my $snaprefs = $attr->{referenced_in_snapshot}; | |
332 | ||
333 | if ($attr->{cdrom}) { | |
334 | if ($volid eq 'cdrom') { | |
335 | my $msg = "can't migrate local cdrom drive"; | |
336 | if (defined($snaprefs) && !$attr->{referenced_in_config}) { | |
337 | my $snapnames = join(', ', sort keys %$snaprefs); | |
338 | $msg .= " (referenced in snapshot - $snapnames)"; | |
339 | } | |
340 | &$log_error("$msg\n"); | |
341 | return; | |
342 | } | |
343 | return if $volid eq 'none'; | |
344 | } | |
345 | ||
346 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
347 | ||
348 | my $targetsid = $override_targetsid // $sid; | |
349 | # check if storage is available on both nodes | |
350 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
351 | PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node}); | |
352 | ||
353 | return if $scfg->{shared}; | |
354 | ||
355 | $sharedvm = 0; | |
356 | ||
357 | $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot'; | |
358 | ||
359 | if ($attr->{cdrom}) { | |
360 | if ($volid =~ /vm-\d+-cloudinit/) { | |
361 | $local_volumes->{$volid}->{ref} = 'generated'; | |
362 | return; | |
363 | } | |
364 | die "local cdrom image\n"; | |
365 | } | |
366 | ||
367 | my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid); | |
368 | ||
369 | die "owned by other VM (owner = VM $owner)\n" | |
370 | if !$owner || ($owner != $self->{vmid}); | |
371 | ||
372 | my $format = PVE::QemuServer::qemu_img_format($scfg, $volname); | |
373 | $local_volumes->{$volid}->{snapshots} = defined($snaprefs) || ($format =~ /^(?:qcow2|vmdk)$/); | |
374 | if (defined($snaprefs)) { | |
375 | # we cannot migrate shapshots on local storage | |
376 | # exceptions: 'zfspool' or 'qcow2' files (on directory storage) | |
377 | ||
378 | die "online storage migration not possible if snapshot exists\n" if $self->{running}; | |
379 | if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) { | |
380 | die "non-migratable snapshot exists\n"; | |
381 | } | |
382 | } | |
383 | ||
384 | die "referenced by linked clone(s)\n" | |
385 | if PVE::Storage::volume_is_base_and_used($self->{storecfg}, $volid); | |
386 | }; | |
387 | ||
388 | PVE::QemuServer::foreach_volid($conf, sub { | |
389 | my ($volid, $attr) = @_; | |
390 | eval { $test_volid->($volid, $attr); }; | |
391 | if (my $err = $@) { | |
392 | &$log_error($err, $volid); | |
393 | } | |
394 | }); | |
395 | ||
396 | foreach my $vol (sort keys %$local_volumes) { | |
397 | my $ref = $local_volumes->{$vol}->{ref}; | |
398 | if ($ref eq 'storage') { | |
399 | $self->log('info', "found local disk '$vol' (via storage)\n"); | |
400 | } elsif ($ref eq 'config') { | |
401 | &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol) | |
402 | if $self->{running} && !$self->{opts}->{"with-local-disks"}; | |
403 | $self->log('info', "found local disk '$vol' (in current VM config)\n"); | |
404 | } elsif ($ref eq 'snapshot') { | |
405 | $self->log('info', "found local disk '$vol' (referenced by snapshot(s))\n"); | |
406 | } elsif ($ref eq 'generated') { | |
407 | $self->log('info', "found generated disk '$vol' (in current VM config)\n"); | |
408 | } else { | |
409 | $self->log('info', "found local disk '$vol'\n"); | |
410 | } | |
411 | } | |
412 | ||
413 | foreach my $vol (sort keys %$local_volumes_errors) { | |
414 | $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}"); | |
415 | } | |
416 | foreach my $err (@$other_errors) { | |
417 | $self->log('warn', "$err"); | |
418 | } | |
419 | ||
420 | if ($abort) { | |
421 | die "can't migrate VM - check log\n"; | |
422 | } | |
423 | ||
424 | # additional checks for local storage | |
425 | foreach my $volid (keys %$local_volumes) { | |
426 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
427 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid); | |
428 | ||
429 | my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') || | |
430 | ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm'); | |
431 | ||
432 | die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n" | |
433 | if !$migratable; | |
434 | ||
435 | # image is a linked clone on local storage, se we can't migrate. | |
436 | if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) { | |
437 | die "can't migrate '$volid' as it's a clone of '$basename'"; | |
438 | } | |
439 | } | |
440 | ||
441 | my $rep_cfg = PVE::ReplicationConfig->new(); | |
442 | if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) { | |
443 | die "can't live migrate VM with replicated volumes\n" if $self->{running}; | |
444 | $self->log('info', "replicating disk images"); | |
445 | my $start_time = time(); | |
446 | my $logfunc = sub { $self->log('info', shift) }; | |
447 | $self->{replicated_volumes} = PVE::Replication::run_replication( | |
448 | 'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc); | |
449 | } | |
450 | ||
451 | $self->log('info', "copying local disk images") if scalar(%$local_volumes); | |
452 | ||
453 | foreach my $volid (keys %$local_volumes) { | |
454 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
455 | my $targetsid = $override_targetsid // $sid; | |
456 | my $ref = $local_volumes->{$volid}->{ref}; | |
457 | if ($self->{running} && $ref eq 'config') { | |
458 | push @{$self->{online_local_volumes}}, $volid; | |
459 | } elsif ($ref eq 'generated') { | |
460 | die "can't live migrate VM with local cloudinit disk. use a shared storage instead\n" if $self->{running}; | |
461 | # skip all generated volumes but queue them for deletion in phase3_cleanup | |
462 | push @{$self->{volumes}}, $volid; | |
463 | next; | |
464 | } else { | |
465 | next if $self->{replicated_volumes}->{$volid}; | |
466 | push @{$self->{volumes}}, $volid; | |
467 | my $opts = $self->{opts}; | |
468 | my $insecure = $opts->{migration_type} eq 'insecure'; | |
469 | my $with_snapshots = $local_volumes->{$volid}->{snapshots}; | |
470 | # use 'migrate' limit for transfer to other node | |
471 | my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$targetsid, $sid], $opts->{bwlimit}); | |
472 | # JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps | |
473 | $bwlimit = $bwlimit * 1024 if defined($bwlimit); | |
474 | ||
475 | PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $targetsid, | |
476 | undef, undef, undef, $bwlimit, $insecure, $with_snapshots); | |
477 | } | |
478 | } | |
479 | }; | |
480 | die "Failed to sync data - $@" if $@; | |
481 | } | |
482 | ||
483 | sub cleanup_remotedisks { | |
484 | my ($self) = @_; | |
485 | ||
486 | foreach my $target_drive (keys %{$self->{target_drive}}) { | |
487 | ||
488 | my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid}); | |
489 | my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file}); | |
490 | ||
491 | my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"]; | |
492 | ||
493 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
494 | if (my $err = $@) { | |
495 | $self->log('err', $err); | |
496 | $self->{errors} = 1; | |
497 | } | |
498 | } | |
499 | } | |
500 | ||
501 | sub phase1 { | |
502 | my ($self, $vmid) = @_; | |
503 | ||
504 | $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})"); | |
505 | ||
506 | my $conf = $self->{vmconf}; | |
507 | ||
508 | # set migrate lock in config file | |
509 | $conf->{lock} = 'migrate'; | |
510 | PVE::QemuConfig->write_config($vmid, $conf); | |
511 | ||
512 | sync_disks($self, $vmid); | |
513 | ||
514 | }; | |
515 | ||
516 | sub phase1_cleanup { | |
517 | my ($self, $vmid, $err) = @_; | |
518 | ||
519 | $self->log('info', "aborting phase 1 - cleanup resources"); | |
520 | ||
521 | my $conf = $self->{vmconf}; | |
522 | delete $conf->{lock}; | |
523 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; | |
524 | if (my $err = $@) { | |
525 | $self->log('err', $err); | |
526 | } | |
527 | ||
528 | if ($self->{volumes}) { | |
529 | foreach my $volid (@{$self->{volumes}}) { | |
530 | $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'"); | |
531 | # fixme: try to remove ? | |
532 | } | |
533 | } | |
534 | } | |
535 | ||
536 | sub phase2 { | |
537 | my ($self, $vmid) = @_; | |
538 | ||
539 | my $conf = $self->{vmconf}; | |
540 | ||
541 | $self->log('info', "starting VM $vmid on remote node '$self->{node}'"); | |
542 | ||
543 | my $raddr; | |
544 | my $rport; | |
545 | my $ruri; # the whole migration dst. URI (protocol:address[:port]) | |
546 | my $nodename = PVE::INotify::nodename(); | |
547 | ||
548 | ## start on remote node | |
549 | my $cmd = [@{$self->{rem_ssh}}]; | |
550 | ||
551 | my $spice_ticket; | |
552 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { | |
553 | my $res = mon_cmd($vmid, 'query-spice'); | |
554 | $spice_ticket = $res->{ticket}; | |
555 | } | |
556 | ||
557 | push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename; | |
558 | ||
559 | my $migration_type = $self->{opts}->{migration_type}; | |
560 | ||
561 | push @$cmd, '--migration_type', $migration_type; | |
562 | ||
563 | push @$cmd, '--migration_network', $self->{opts}->{migration_network} | |
564 | if $self->{opts}->{migration_network}; | |
565 | ||
566 | if ($migration_type eq 'insecure') { | |
567 | push @$cmd, '--stateuri', 'tcp'; | |
568 | } else { | |
569 | push @$cmd, '--stateuri', 'unix'; | |
570 | } | |
571 | ||
572 | if ($self->{forcemachine}) { | |
573 | push @$cmd, '--machine', $self->{forcemachine}; | |
574 | } | |
575 | ||
576 | if ($self->{online_local_volumes}) { | |
577 | push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1'); | |
578 | } | |
579 | ||
580 | my $spice_port; | |
581 | ||
582 | # Note: We try to keep $spice_ticket secret (do not pass via command line parameter) | |
583 | # instead we pipe it through STDIN | |
584 | PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub { | |
585 | my $line = shift; | |
586 | ||
587 | if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) { | |
588 | $raddr = $1; | |
589 | $rport = int($2); | |
590 | $ruri = "tcp:$raddr:$rport"; | |
591 | } | |
592 | elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) { | |
593 | $raddr = $1; | |
594 | die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2; | |
595 | $ruri = "unix:$raddr"; | |
596 | } | |
597 | elsif ($line =~ m/^migration listens on port (\d+)$/) { | |
598 | $raddr = "localhost"; | |
599 | $rport = int($1); | |
600 | $ruri = "tcp:$raddr:$rport"; | |
601 | } | |
602 | elsif ($line =~ m/^spice listens on port (\d+)$/) { | |
603 | $spice_port = int($1); | |
604 | } | |
605 | elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) { | |
606 | my $volid = $4; | |
607 | my $nbd_uri = "nbd:$1:$2:exportname=$3"; | |
608 | my $targetdrive = $3; | |
609 | $targetdrive =~ s/drive-//g; | |
610 | ||
611 | $self->{target_drive}->{$targetdrive}->{volid} = $volid; | |
612 | $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri; | |
613 | ||
614 | } | |
615 | }, errfunc => sub { | |
616 | my $line = shift; | |
617 | $self->log('info', $line); | |
618 | }); | |
619 | ||
620 | die "unable to detect remote migration address\n" if !$raddr; | |
621 | ||
622 | $self->log('info', "start remote tunnel"); | |
623 | ||
624 | if ($migration_type eq 'secure') { | |
625 | ||
626 | if ($ruri =~ /^unix:/) { | |
627 | unlink $raddr; | |
628 | $self->{tunnel} = $self->fork_tunnel("$raddr:$raddr"); | |
629 | $self->{tunnel}->{sock_addr} = $raddr; | |
630 | ||
631 | my $unix_socket_try = 0; # wait for the socket to become ready | |
632 | while (! -S $raddr) { | |
633 | $unix_socket_try++; | |
634 | if ($unix_socket_try > 100) { | |
635 | $self->{errors} = 1; | |
636 | $self->finish_tunnel($self->{tunnel}); | |
637 | die "Timeout, migration socket $ruri did not get ready"; | |
638 | } | |
639 | ||
640 | usleep(50000); | |
641 | } | |
642 | ||
643 | } elsif ($ruri =~ /^tcp:/) { | |
644 | my $tunnel_addr; | |
645 | if ($raddr eq "localhost") { | |
646 | # for backwards compatibility with older qemu-server versions | |
647 | my $pfamily = PVE::Tools::get_host_address_family($nodename); | |
648 | my $lport = PVE::Tools::next_migrate_port($pfamily); | |
649 | $tunnel_addr = "$lport:localhost:$rport"; | |
650 | } | |
651 | ||
652 | $self->{tunnel} = $self->fork_tunnel($tunnel_addr); | |
653 | ||
654 | } else { | |
655 | die "unsupported protocol in migration URI: $ruri\n"; | |
656 | } | |
657 | } else { | |
658 | #fork tunnel for insecure migration, to send faster commands like resume | |
659 | $self->{tunnel} = $self->fork_tunnel(); | |
660 | } | |
661 | ||
662 | my $start = time(); | |
663 | ||
664 | my $opt_bwlimit = $self->{opts}->{bwlimit}; | |
665 | ||
666 | if (defined($self->{online_local_volumes})) { | |
667 | $self->{storage_migration} = 1; | |
668 | $self->{storage_migration_jobs} = {}; | |
669 | $self->log('info', "starting storage migration"); | |
670 | ||
671 | die "The number of local disks does not match between the source and the destination.\n" | |
672 | if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}}); | |
673 | foreach my $drive (keys %{$self->{target_drive}}){ | |
674 | my $target = $self->{target_drive}->{$drive}; | |
675 | my $nbd_uri = $target->{nbd_uri}; | |
676 | my $source_sid = PVE::Storage::Plugin::parse_volume_id($conf->{$drive}); | |
677 | my $target_sid = PVE::Storage::Plugin::parse_volume_id($target->{volid}); | |
678 | my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', [$source_sid, $target_sid], $opt_bwlimit); | |
679 | ||
680 | $self->log('info', "$drive: start migration to $nbd_uri"); | |
681 | PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1, undef, $bwlimit); | |
682 | } | |
683 | } | |
684 | ||
685 | $self->log('info', "starting online/live migration on $ruri"); | |
686 | $self->{livemigration} = 1; | |
687 | ||
688 | # load_defaults | |
689 | my $defaults = PVE::QemuServer::load_defaults(); | |
690 | ||
691 | # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the | |
692 | # migrate_speed parameter in qm.conf - take the lower of the two. | |
693 | my $bwlimit = PVE::Storage::get_bandwidth_limit('migrate', undef, $opt_bwlimit) // 0; | |
694 | my $migrate_speed = $conf->{migrate_speed} // $bwlimit; | |
695 | # migrate_speed is in MB/s, bwlimit in KB/s | |
696 | $migrate_speed *= 1024; | |
697 | ||
698 | $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed; | |
699 | ||
700 | # always set migrate speed (overwrite kvm default of 32m) we set a very high | |
701 | # default of 8192m which is basically unlimited | |
702 | $migrate_speed ||= ($defaults->{migrate_speed} || 8192) * 1024; | |
703 | ||
704 | # qmp takes migrate_speed in B/s. | |
705 | $migrate_speed *= 1024; | |
706 | $self->log('info', "migrate_set_speed: $migrate_speed"); | |
707 | eval { | |
708 | mon_cmd($vmid, "migrate_set_speed", value => int($migrate_speed)); | |
709 | }; | |
710 | $self->log('info', "migrate_set_speed error: $@") if $@; | |
711 | ||
712 | my $migrate_downtime = $defaults->{migrate_downtime}; | |
713 | $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime}); | |
714 | if (defined($migrate_downtime)) { | |
715 | $self->log('info', "migrate_set_downtime: $migrate_downtime"); | |
716 | eval { | |
717 | mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); | |
718 | }; | |
719 | $self->log('info', "migrate_set_downtime error: $@") if $@; | |
720 | } | |
721 | ||
722 | $self->log('info', "set migration_caps"); | |
723 | eval { | |
724 | PVE::QemuServer::set_migration_caps($vmid); | |
725 | }; | |
726 | warn $@ if $@; | |
727 | ||
728 | # set cachesize to 10% of the total memory | |
729 | my $memory = $conf->{memory} || $defaults->{memory}; | |
730 | my $cachesize = int($memory * 1048576 / 10); | |
731 | $cachesize = round_powerof2($cachesize); | |
732 | ||
733 | $self->log('info', "set cachesize: $cachesize"); | |
734 | eval { | |
735 | mon_cmd($vmid, "migrate-set-cache-size", value => int($cachesize)); | |
736 | }; | |
737 | $self->log('info', "migrate-set-cache-size error: $@") if $@; | |
738 | ||
739 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { | |
740 | my $rpcenv = PVE::RPCEnvironment::get(); | |
741 | my $authuser = $rpcenv->get_user(); | |
742 | ||
743 | my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node}); | |
744 | ||
745 | my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem"; | |
746 | my $subject = PVE::AccessControl::read_x509_subject_spice($filename); | |
747 | ||
748 | $self->log('info', "spice client_migrate_info"); | |
749 | ||
750 | eval { | |
751 | mon_cmd($vmid, "client_migrate_info", protocol => 'spice', | |
752 | hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port, | |
753 | 'cert-subject' => $subject); | |
754 | }; | |
755 | $self->log('info', "client_migrate_info error: $@") if $@; | |
756 | ||
757 | } | |
758 | ||
759 | $self->log('info', "start migrate command to $ruri"); | |
760 | eval { | |
761 | mon_cmd($vmid, "migrate", uri => $ruri); | |
762 | }; | |
763 | my $merr = $@; | |
764 | $self->log('info', "migrate uri => $ruri failed: $merr") if $merr; | |
765 | ||
766 | my $lstat = 0; | |
767 | my $usleep = 1000000; | |
768 | my $i = 0; | |
769 | my $err_count = 0; | |
770 | my $lastrem = undef; | |
771 | my $downtimecounter = 0; | |
772 | while (1) { | |
773 | $i++; | |
774 | my $avglstat = $lstat/$i if $lstat; | |
775 | ||
776 | usleep($usleep); | |
777 | my $stat; | |
778 | eval { | |
779 | $stat = mon_cmd($vmid, "query-migrate"); | |
780 | }; | |
781 | if (my $err = $@) { | |
782 | $err_count++; | |
783 | warn "query migrate failed: $err\n"; | |
784 | $self->log('info', "query migrate failed: $err"); | |
785 | if ($err_count <= 5) { | |
786 | usleep(1000000); | |
787 | next; | |
788 | } | |
789 | die "too many query migrate failures - aborting\n"; | |
790 | } | |
791 | ||
792 | if (defined($stat->{status}) && $stat->{status} =~ m/^(setup)$/im) { | |
793 | sleep(1); | |
794 | next; | |
795 | } | |
796 | ||
797 | if (defined($stat->{status}) && $stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) { | |
798 | $merr = undef; | |
799 | $err_count = 0; | |
800 | if ($stat->{status} eq 'completed') { | |
801 | my $delay = time() - $start; | |
802 | if ($delay > 0) { | |
803 | my $mbps = sprintf "%.2f", $memory / $delay; | |
804 | my $downtime = $stat->{downtime} || 0; | |
805 | $self->log('info', "migration speed: $mbps MB/s - downtime $downtime ms"); | |
806 | } | |
807 | } | |
808 | ||
809 | if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') { | |
810 | $self->log('info', "migration status error: $stat->{status}"); | |
811 | die "aborting\n" | |
812 | } | |
813 | ||
814 | if ($stat->{status} ne 'active') { | |
815 | $self->log('info', "migration status: $stat->{status}"); | |
816 | last; | |
817 | } | |
818 | ||
819 | if ($stat->{ram}->{transferred} ne $lstat) { | |
820 | my $trans = $stat->{ram}->{transferred} || 0; | |
821 | my $rem = $stat->{ram}->{remaining} || 0; | |
822 | my $total = $stat->{ram}->{total} || 0; | |
823 | my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0; | |
824 | my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0; | |
825 | my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0; | |
826 | my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0; | |
827 | my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0; | |
828 | # reduce sleep if remainig memory is lower than the average transfer speed | |
829 | $usleep = 100000 if $avglstat && $rem < $avglstat; | |
830 | ||
831 | $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " . | |
832 | "remaining ${rem}), total ${total})"); | |
833 | ||
834 | if (${xbzrlecachesize}) { | |
835 | $self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}"); | |
836 | } | |
837 | ||
838 | if (($lastrem && $rem > $lastrem ) || ($rem == 0)) { | |
839 | $downtimecounter++; | |
840 | } | |
841 | $lastrem = $rem; | |
842 | ||
843 | if ($downtimecounter > 5) { | |
844 | $downtimecounter = 0; | |
845 | $migrate_downtime *= 2; | |
846 | $self->log('info', "migrate_set_downtime: $migrate_downtime"); | |
847 | eval { | |
848 | mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); | |
849 | }; | |
850 | $self->log('info', "migrate_set_downtime error: $@") if $@; | |
851 | } | |
852 | ||
853 | } | |
854 | ||
855 | ||
856 | $lstat = $stat->{ram}->{transferred}; | |
857 | ||
858 | } else { | |
859 | die $merr if $merr; | |
860 | die "unable to parse migration status '$stat->{status}' - aborting\n"; | |
861 | } | |
862 | } | |
863 | } | |
864 | ||
865 | sub phase2_cleanup { | |
866 | my ($self, $vmid, $err) = @_; | |
867 | ||
868 | return if !$self->{errors}; | |
869 | $self->{phase2errors} = 1; | |
870 | ||
871 | $self->log('info', "aborting phase 2 - cleanup resources"); | |
872 | ||
873 | $self->log('info', "migrate_cancel"); | |
874 | eval { | |
875 | mon_cmd($vmid, "migrate_cancel"); | |
876 | }; | |
877 | $self->log('info', "migrate_cancel error: $@") if $@; | |
878 | ||
879 | my $conf = $self->{vmconf}; | |
880 | delete $conf->{lock}; | |
881 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; | |
882 | if (my $err = $@) { | |
883 | $self->log('err', $err); | |
884 | } | |
885 | ||
886 | # cleanup ressources on target host | |
887 | if ($self->{storage_migration}) { | |
888 | ||
889 | eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; | |
890 | if (my $err = $@) { | |
891 | $self->log('err', $err); | |
892 | } | |
893 | ||
894 | eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; | |
895 | if (my $err = $@) { | |
896 | $self->log('err', $err); | |
897 | } | |
898 | } | |
899 | ||
900 | my $nodename = PVE::INotify::nodename(); | |
901 | ||
902 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename]; | |
903 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
904 | if (my $err = $@) { | |
905 | $self->log('err', $err); | |
906 | $self->{errors} = 1; | |
907 | } | |
908 | ||
909 | if ($self->{tunnel}) { | |
910 | eval { finish_tunnel($self, $self->{tunnel}); }; | |
911 | if (my $err = $@) { | |
912 | $self->log('err', $err); | |
913 | $self->{errors} = 1; | |
914 | } | |
915 | } | |
916 | } | |
917 | ||
918 | sub phase3 { | |
919 | my ($self, $vmid) = @_; | |
920 | ||
921 | my $volids = $self->{volumes}; | |
922 | return if $self->{phase2errors}; | |
923 | ||
924 | # destroy local copies | |
925 | foreach my $volid (@$volids) { | |
926 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; | |
927 | if (my $err = $@) { | |
928 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
929 | $self->{errors} = 1; | |
930 | last if $err =~ /^interrupted by signal$/; | |
931 | } | |
932 | } | |
933 | } | |
934 | ||
935 | sub phase3_cleanup { | |
936 | my ($self, $vmid, $err) = @_; | |
937 | ||
938 | my $conf = $self->{vmconf}; | |
939 | return if $self->{phase2errors}; | |
940 | ||
941 | my $tunnel = $self->{tunnel}; | |
942 | ||
943 | if ($self->{storage_migration}) { | |
944 | # finish block-job | |
945 | eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); }; | |
946 | ||
947 | if (my $err = $@) { | |
948 | eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; | |
949 | eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; | |
950 | die "Failed to complete storage migration\n"; | |
951 | } else { | |
952 | foreach my $target_drive (keys %{$self->{target_drive}}) { | |
953 | my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid}); | |
954 | $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive); | |
955 | PVE::QemuConfig->write_config($vmid, $conf); | |
956 | } | |
957 | } | |
958 | } | |
959 | ||
960 | # transfer replication state before move config | |
961 | $self->transfer_replication_state() if $self->{replicated_volumes}; | |
962 | ||
963 | # move config to remote node | |
964 | my $conffile = PVE::QemuConfig->config_file($vmid); | |
965 | my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node}); | |
966 | ||
967 | die "Failed to move config to node '$self->{node}' - rename failed: $!\n" | |
968 | if !rename($conffile, $newconffile); | |
969 | ||
970 | $self->switch_replication_job_target() if $self->{replicated_volumes}; | |
971 | ||
972 | if ($self->{livemigration}) { | |
973 | if ($self->{storage_migration}) { | |
974 | # stop nbd server on remote vm - requirement for resume since 2.9 | |
975 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid]; | |
976 | ||
977 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
978 | if (my $err = $@) { | |
979 | $self->log('err', $err); | |
980 | $self->{errors} = 1; | |
981 | } | |
982 | } | |
983 | ||
984 | # config moved and nbd server stopped - now we can resume vm on target | |
985 | if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) { | |
986 | eval { | |
987 | $self->write_tunnel($tunnel, 30, "resume $vmid"); | |
988 | }; | |
989 | if (my $err = $@) { | |
990 | $self->log('err', $err); | |
991 | $self->{errors} = 1; | |
992 | } | |
993 | } else { | |
994 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck']; | |
995 | my $logf = sub { | |
996 | my $line = shift; | |
997 | $self->log('err', $line); | |
998 | }; | |
999 | eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); }; | |
1000 | if (my $err = $@) { | |
1001 | $self->log('err', $err); | |
1002 | $self->{errors} = 1; | |
1003 | } | |
1004 | } | |
1005 | ||
1006 | if ($self->{storage_migration} && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && $self->{running}) { | |
1007 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim']; | |
1008 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | # close tunnel on successful migration, on error phase2_cleanup closed it | |
1013 | if ($tunnel) { | |
1014 | eval { finish_tunnel($self, $tunnel); }; | |
1015 | if (my $err = $@) { | |
1016 | $self->log('err', $err); | |
1017 | $self->{errors} = 1; | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | eval { | |
1022 | my $timer = 0; | |
1023 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) { | |
1024 | $self->log('info', "Waiting for spice server migration"); | |
1025 | while (1) { | |
1026 | my $res = mon_cmd($vmid, 'query-spice'); | |
1027 | last if int($res->{'migrated'}) == 1; | |
1028 | last if $timer > 50; | |
1029 | $timer ++; | |
1030 | usleep(200000); | |
1031 | } | |
1032 | } | |
1033 | }; | |
1034 | ||
1035 | # always stop local VM | |
1036 | eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); }; | |
1037 | if (my $err = $@) { | |
1038 | $self->log('err', "stopping vm failed - $err"); | |
1039 | $self->{errors} = 1; | |
1040 | } | |
1041 | ||
1042 | # always deactivate volumes - avoid lvm LVs to be active on several nodes | |
1043 | eval { | |
1044 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
1045 | PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist); | |
1046 | }; | |
1047 | if (my $err = $@) { | |
1048 | $self->log('err', $err); | |
1049 | $self->{errors} = 1; | |
1050 | } | |
1051 | ||
1052 | if($self->{storage_migration}) { | |
1053 | # destroy local copies | |
1054 | my $volids = $self->{online_local_volumes}; | |
1055 | ||
1056 | foreach my $volid (@$volids) { | |
1057 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; | |
1058 | if (my $err = $@) { | |
1059 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
1060 | $self->{errors} = 1; | |
1061 | last if $err =~ /^interrupted by signal$/; | |
1062 | } | |
1063 | } | |
1064 | ||
1065 | } | |
1066 | ||
1067 | # clear migrate lock | |
1068 | my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ]; | |
1069 | $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock"); | |
1070 | } | |
1071 | ||
1072 | sub final_cleanup { | |
1073 | my ($self, $vmid) = @_; | |
1074 | ||
1075 | # nothing to do | |
1076 | } | |
1077 | ||
1078 | sub round_powerof2 { | |
1079 | return 1 if $_[0] < 2; | |
1080 | return 2 << int(log($_[0]-1)/log(2)); | |
1081 | } | |
1082 | ||
1083 | 1; |