]>
Commit | Line | Data |
---|---|---|
3ea94c60 | 1 | package PVE::QemuMigrate; |
1ef75254 | 2 | |
1e3baf05 | 3 | use strict; |
3ea94c60 | 4 | use warnings; |
6d7450cb | 5 | |
3ea94c60 | 6 | use IO::File; |
1e3baf05 | 7 | use IPC::Open2; |
61b04c6d | 8 | use POSIX qw( WNOHANG ); |
6d7450cb TL |
9 | use Time::HiRes qw( usleep ); |
10 | ||
0fca250a | 11 | use PVE::Format qw(render_bytes); |
3ea94c60 | 12 | use PVE::Cluster; |
4b26ffbf | 13 | use PVE::GuestHelpers qw(safe_boolean_ne safe_string_ne); |
6d7450cb TL |
14 | use PVE::INotify; |
15 | use PVE::RPCEnvironment; | |
16 | use PVE::Replication; | |
17 | use PVE::ReplicationConfig; | |
18 | use PVE::ReplicationState; | |
1e3baf05 | 19 | use PVE::Storage; |
6d7450cb | 20 | use PVE::Tools; |
e594231b | 21 | use PVE::Tunnel; |
6d7450cb | 22 | |
912792e2 | 23 | use PVE::QemuConfig; |
58c64ad5 | 24 | use PVE::QemuServer::CPUConfig; |
e0fd2b2f | 25 | use PVE::QemuServer::Drive; |
28e6e180 | 26 | use PVE::QemuServer::Helpers qw(min_version); |
3392d6ca | 27 | use PVE::QemuServer::Machine; |
0a13e08e | 28 | use PVE::QemuServer::Monitor qw(mon_cmd); |
6d7450cb | 29 | use PVE::QemuServer; |
1e3baf05 | 30 | |
6d7450cb | 31 | use PVE::AbstractMigrate; |
16e903f2 | 32 | use base qw(PVE::AbstractMigrate); |
1e3baf05 | 33 | |
1e3baf05 | 34 | sub fork_tunnel { |
ae194a5c | 35 | my ($self, $ssh_forward_info) = @_; |
1e3baf05 | 36 | |
e594231b FG |
37 | my $cmd = ['/usr/sbin/qm', 'mtunnel']; |
38 | my $log = sub { | |
39 | my ($level, $msg) = @_; | |
40 | $self->log($level, $msg); | |
1e3baf05 | 41 | }; |
1c9d54bf | 42 | |
e594231b | 43 | return PVE::Tunnel::fork_ssh_tunnel($self->{rem_ssh}, $cmd, $ssh_forward_info, $log); |
1e3baf05 DM |
44 | } |
45 | ||
7d730f95 FE |
46 | sub start_remote_tunnel { |
47 | my ($self, $raddr, $rport, $ruri, $unix_socket_info) = @_; | |
48 | ||
49 | my $nodename = PVE::INotify::nodename(); | |
50 | my $migration_type = $self->{opts}->{migration_type}; | |
51 | ||
52 | if ($migration_type eq 'secure') { | |
53 | ||
54 | if ($ruri =~ /^unix:/) { | |
55 | my $ssh_forward_info = ["$raddr:$raddr"]; | |
56 | $unix_socket_info->{$raddr} = 1; | |
57 | ||
58 | my $unix_sockets = [ keys %$unix_socket_info ]; | |
59 | for my $sock (@$unix_sockets) { | |
60 | push @$ssh_forward_info, "$sock:$sock"; | |
61 | unlink $sock; | |
62 | } | |
63 | ||
64 | $self->{tunnel} = $self->fork_tunnel($ssh_forward_info); | |
65 | ||
66 | my $unix_socket_try = 0; # wait for the socket to become ready | |
67 | while ($unix_socket_try <= 100) { | |
68 | $unix_socket_try++; | |
69 | my $available = 0; | |
70 | foreach my $sock (@$unix_sockets) { | |
71 | if (-S $sock) { | |
72 | $available++; | |
73 | } | |
74 | } | |
75 | ||
76 | if ($available == @$unix_sockets) { | |
77 | last; | |
78 | } | |
79 | ||
80 | usleep(50000); | |
81 | } | |
82 | if ($unix_socket_try > 100) { | |
83 | $self->{errors} = 1; | |
e594231b | 84 | PVE::Tunnel::finish_tunnel($self->{tunnel}); |
7d730f95 FE |
85 | die "Timeout, migration socket $ruri did not get ready"; |
86 | } | |
87 | $self->{tunnel}->{unix_sockets} = $unix_sockets if (@$unix_sockets); | |
88 | ||
89 | } elsif ($ruri =~ /^tcp:/) { | |
90 | my $ssh_forward_info = []; | |
91 | if ($raddr eq "localhost") { | |
92 | # for backwards compatibility with older qemu-server versions | |
93 | my $pfamily = PVE::Tools::get_host_address_family($nodename); | |
94 | my $lport = PVE::Tools::next_migrate_port($pfamily); | |
95 | push @$ssh_forward_info, "$lport:localhost:$rport"; | |
96 | } | |
97 | ||
98 | $self->{tunnel} = $self->fork_tunnel($ssh_forward_info); | |
99 | ||
100 | } else { | |
101 | die "unsupported protocol in migration URI: $ruri\n"; | |
102 | } | |
103 | } else { | |
104 | #fork tunnel for insecure migration, to send faster commands like resume | |
105 | $self->{tunnel} = $self->fork_tunnel(); | |
106 | } | |
107 | } | |
108 | ||
16e903f2 DM |
109 | sub lock_vm { |
110 | my ($self, $vmid, $code, @param) = @_; | |
f5eb281a | 111 | |
ffda963f | 112 | return PVE::QemuConfig->lock_config($vmid, $code, @param); |
16e903f2 | 113 | } |
ff1a2432 | 114 | |
16e903f2 DM |
115 | sub prepare { |
116 | my ($self, $vmid) = @_; | |
ff1a2432 | 117 | |
16e903f2 | 118 | my $online = $self->{opts}->{online}; |
3ea94c60 | 119 | |
8a5bd889 | 120 | my $storecfg = $self->{storecfg} = PVE::Storage::config(); |
3ea94c60 | 121 | |
e1fc368d | 122 | # test if VM exists |
ffda963f | 123 | my $conf = $self->{vmconf} = PVE::QemuConfig->load_config($vmid); |
3ea94c60 | 124 | |
c2c96d73 FE |
125 | my $repl_conf = PVE::ReplicationConfig->new(); |
126 | $self->{replication_jobcfg} = $repl_conf->find_local_replication_job($vmid, $self->{node}); | |
127 | $self->{is_replicated} = $repl_conf->check_for_existing_jobs($vmid, 1); | |
128 | ||
19ff3682 FE |
129 | if ($self->{replication_jobcfg} && defined($self->{replication_jobcfg}->{remove_job})) { |
130 | die "refusing to migrate replicated VM whose replication job is marked for removal\n"; | |
131 | } | |
132 | ||
ffda963f | 133 | PVE::QemuConfig->check_lock($conf); |
3ea94c60 | 134 | |
16e903f2 DM |
135 | my $running = 0; |
136 | if (my $pid = PVE::QemuServer::check_running($vmid)) { | |
b6adff33 | 137 | die "can't migrate running VM without --online\n" if !$online; |
16e903f2 | 138 | $running = $pid; |
42dbd2ee | 139 | |
c2c96d73 | 140 | if ($self->{is_replicated} && !$self->{replication_jobcfg}) { |
68980d66 FE |
141 | if ($self->{opts}->{force}) { |
142 | $self->log('warn', "WARNING: Node '$self->{node}' is not a replication target. Existing " . | |
143 | "replication jobs will fail after migration!\n"); | |
144 | } else { | |
145 | die "Cannot live-migrate replicated VM to node '$self->{node}' - not a replication " . | |
146 | "target. Use 'force' to override.\n"; | |
147 | } | |
148 | } | |
149 | ||
3392d6ca | 150 | $self->{forcemachine} = PVE::QemuServer::Machine::qemu_machine_pxe($vmid, $conf); |
7bac824e | 151 | |
58c64ad5 SR |
152 | # To support custom CPU types, we keep QEMU's "-cpu" parameter intact. |
153 | # Since the parameter itself contains no reference to a custom model, | |
154 | # this makes migration independent of changes to "cpu-models.conf". | |
155 | if ($conf->{cpu}) { | |
b53ba8d0 | 156 | my $cpuconf = PVE::JSONSchema::parse_property_string('pve-cpu-conf', $conf->{cpu}); |
58c64ad5 SR |
157 | if ($cpuconf && PVE::QemuServer::CPUConfig::is_custom_model($cpuconf->{cputype})) { |
158 | $self->{forcecpu} = PVE::QemuServer::CPUConfig::get_cpu_from_running_vm($pid); | |
159 | } | |
160 | } | |
a183576e FE |
161 | |
162 | $self->{vm_was_paused} = 1 if PVE::QemuServer::vm_is_paused($vmid); | |
3ea94c60 | 163 | } |
58c64ad5 | 164 | |
ca6abacf TM |
165 | my $loc_res = PVE::QemuServer::check_local_resources($conf, 1); |
166 | if (scalar @$loc_res) { | |
16e903f2 | 167 | if ($self->{running} || !$self->{opts}->{force}) { |
ca6abacf | 168 | die "can't migrate VM which uses local devices: " . join(", ", @$loc_res) . "\n"; |
16e903f2 DM |
169 | } else { |
170 | $self->log('info', "migrating VM which uses local devices"); | |
171 | } | |
3ea94c60 DM |
172 | } |
173 | ||
ff1a2432 | 174 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); |
29701766 FG |
175 | foreach my $volid (@$vollist) { |
176 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1); | |
177 | ||
178 | # check if storage is available on both nodes | |
8a5bd889 | 179 | my $scfg = PVE::Storage::storage_check_enabled($storecfg, $sid); |
d213ba29 | 180 | |
95b3583b TL |
181 | my $targetsid = $sid; |
182 | # NOTE: we currently ignore shared source storages in mappings so skip here too for now | |
183 | if (!$scfg->{shared}) { | |
82a03671 | 184 | $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid); |
d213ba29 FE |
185 | } |
186 | ||
f8830c4d | 187 | my $target_scfg = PVE::Storage::storage_check_enabled($storecfg, $targetsid, $self->{node}); |
db861a46 | 188 | my ($vtype) = PVE::Storage::parse_volname($storecfg, $volid); |
24b84b47 | 189 | |
db861a46 TL |
190 | die "$volid: content type '$vtype' is not available on storage '$targetsid'\n" |
191 | if !$target_scfg->{content}->{$vtype}; | |
73f5ee92 FG |
192 | |
193 | if ($scfg->{shared}) { | |
194 | # PVE::Storage::activate_storage checks this for non-shared storages | |
195 | my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); | |
196 | warn "Used shared storage '$sid' is not online on source node!\n" | |
197 | if !$plugin->check_connection($sid, $scfg); | |
73f5ee92 | 198 | } |
29701766 | 199 | } |
3ea94c60 DM |
200 | |
201 | # test ssh connection | |
16e903f2 DM |
202 | my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ]; |
203 | eval { $self->cmd_quiet($cmd); }; | |
3ea94c60 | 204 | die "Can't connect to destination address using public key\n" if $@; |
ff1a2432 | 205 | |
16e903f2 | 206 | return $running; |
3ea94c60 DM |
207 | } |
208 | ||
d10b78f4 | 209 | sub scan_local_volumes { |
16e903f2 DM |
210 | my ($self, $vmid) = @_; |
211 | ||
16e903f2 DM |
212 | my $conf = $self->{vmconf}; |
213 | ||
dabf2473 | 214 | # local volumes which have been copied |
37666e4c | 215 | # and their old_id => new_id pairs |
37666e4c | 216 | $self->{volume_map} = {}; |
d10b78f4 | 217 | $self->{local_volumes} = {}; |
3ea94c60 | 218 | |
b10afa31 | 219 | my $storecfg = $self->{storecfg}; |
3ea94c60 DM |
220 | eval { |
221 | ||
dabf2473 | 222 | # found local volumes and their origin |
d10b78f4 | 223 | my $local_volumes = $self->{local_volumes}; |
5bf7f0f1 FG |
224 | my $local_volumes_errors = {}; |
225 | my $other_errors = []; | |
226 | my $abort = 0; | |
3ea94c60 | 227 | |
5bf7f0f1 FG |
228 | my $log_error = sub { |
229 | my ($msg, $volid) = @_; | |
230 | ||
231 | if (defined($volid)) { | |
232 | $local_volumes_errors->{$volid} = $msg; | |
233 | } else { | |
234 | push @$other_errors, $msg; | |
235 | } | |
236 | $abort = 1; | |
237 | }; | |
238 | ||
b10afa31 | 239 | my @sids = PVE::Storage::storage_ids($storecfg); |
86638cc2 | 240 | foreach my $storeid (@sids) { |
b10afa31 | 241 | my $scfg = PVE::Storage::storage_config($storecfg, $storeid); |
86638cc2 | 242 | next if $scfg->{shared}; |
b10afa31 | 243 | next if !PVE::Storage::storage_check_enabled($storecfg, $storeid, undef, 1); |
373ea579 | 244 | |
86638cc2 | 245 | # get list from PVE::Storage (for unused volumes) |
24b84b47 | 246 | my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, $vmid, undef, 'images'); |
89719f98 FG |
247 | |
248 | next if @{$dl->{$storeid}} == 0; | |
249 | ||
82a03671 | 250 | my $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $storeid); |
86638cc2 | 251 | # check if storage is available on target node |
24b84b47 FE |
252 | my $target_scfg = PVE::Storage::storage_check_enabled( |
253 | $storecfg, | |
254 | $targetsid, | |
255 | $self->{node}, | |
256 | ); | |
89719f98 | 257 | |
24b84b47 FE |
258 | die "content type 'images' is not available on storage '$targetsid'\n" |
259 | if !$target_scfg->{content}->{images}; | |
bf8fc5a3 | 260 | |
c3417e3b FE |
261 | my $bwlimit = PVE::Storage::get_bandwidth_limit( |
262 | 'migration', | |
263 | [$targetsid, $storeid], | |
264 | $self->{opts}->{bwlimit}, | |
265 | ); | |
266 | ||
86638cc2 | 267 | PVE::Storage::foreach_volid($dl, sub { |
5eca0c36 | 268 | my ($volid, $sid, $volinfo) = @_; |
80b2cbd1 | 269 | |
6f58fce9 | 270 | $local_volumes->{$volid}->{ref} = 'storage'; |
62a4c963 | 271 | $local_volumes->{$volid}->{size} = $volinfo->{size}; |
c3417e3b FE |
272 | $local_volumes->{$volid}->{targetsid} = $targetsid; |
273 | $local_volumes->{$volid}->{bwlimit} = $bwlimit; | |
5eca0c36 FE |
274 | |
275 | # If with_snapshots is not set for storage migrate, it tries to use | |
276 | # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size | |
277 | # back to qcow2 is currently not possible. | |
278 | $local_volumes->{$volid}->{snapshots} = ($volinfo->{format} =~ /^(?:qcow2|vmdk)$/); | |
0ad295f9 | 279 | $local_volumes->{$volid}->{format} = $volinfo->{format}; |
86638cc2 FG |
280 | }); |
281 | } | |
3ea94c60 | 282 | |
c2c96d73 | 283 | my $replicatable_volumes = !$self->{replication_jobcfg} ? {} |
2cd808d3 | 284 | : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1); |
4b26ffbf FE |
285 | foreach my $volid (keys %{$replicatable_volumes}) { |
286 | $local_volumes->{$volid}->{replicated} = 1; | |
287 | } | |
b9f44d27 | 288 | |
3629c19d | 289 | my $test_volid = sub { |
aee6abe5 | 290 | my ($volid, $attr) = @_; |
3ea94c60 | 291 | |
5bf7f0f1 | 292 | if ($volid =~ m|^/|) { |
ec82e3ee | 293 | return if $attr->{shared}; |
6f58fce9 | 294 | $local_volumes->{$volid}->{ref} = 'config'; |
5bf7f0f1 FG |
295 | die "local file/device\n"; |
296 | } | |
3ea94c60 | 297 | |
aee6abe5 DM |
298 | my $snaprefs = $attr->{referenced_in_snapshot}; |
299 | ||
300 | if ($attr->{cdrom}) { | |
5bf7f0f1 FG |
301 | if ($volid eq 'cdrom') { |
302 | my $msg = "can't migrate local cdrom drive"; | |
5009a8c7 | 303 | if (defined($snaprefs) && !$attr->{referenced_in_config}) { |
aee6abe5 | 304 | my $snapnames = join(', ', sort keys %$snaprefs); |
5009a8c7 | 305 | $msg .= " (referenced in snapshot - $snapnames)"; |
aee6abe5 | 306 | } |
5bf7f0f1 FG |
307 | &$log_error("$msg\n"); |
308 | return; | |
309 | } | |
3ea94c60 | 310 | return if $volid eq 'none'; |
3ea94c60 DM |
311 | } |
312 | ||
313 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
314 | ||
16e903f2 | 315 | # check if storage is available on both nodes |
0d2db084 | 316 | my $scfg = PVE::Storage::storage_check_enabled($storecfg, $sid); |
d213ba29 | 317 | |
95b3583b TL |
318 | my $targetsid = $sid; |
319 | # NOTE: we currently ignore shared source storages in mappings so skip here too for now | |
320 | if (!$scfg->{shared}) { | |
82a03671 | 321 | $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid); |
d213ba29 FE |
322 | } |
323 | ||
0d2db084 | 324 | PVE::Storage::storage_check_enabled($storecfg, $targetsid, $self->{node}); |
3ea94c60 DM |
325 | |
326 | return if $scfg->{shared}; | |
327 | ||
6f58fce9 | 328 | $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot'; |
ae180b8f | 329 | $local_volumes->{$volid}->{ref} = 'storage' if $attr->{is_unused}; |
f9dde219 | 330 | $local_volumes->{$volid}->{ref} = 'generated' if $attr->{is_tpmstate}; |
d62fcf74 | 331 | |
cc1a3820 FE |
332 | $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0; |
333 | ||
a6be63ac FE |
334 | $local_volumes->{$volid}->{drivename} = $attr->{drivename} |
335 | if $attr->{drivename}; | |
336 | ||
9e93a63f ML |
337 | if ($attr->{cdrom}) { |
338 | if ($volid =~ /vm-\d+-cloudinit/) { | |
339 | $local_volumes->{$volid}->{ref} = 'generated'; | |
340 | return; | |
341 | } | |
342 | die "local cdrom image\n"; | |
343 | } | |
3629c19d | 344 | |
b10afa31 | 345 | my ($path, $owner) = PVE::Storage::path($storecfg, $volid); |
3ea94c60 | 346 | |
5bf7f0f1 | 347 | die "owned by other VM (owner = VM $owner)\n" |
b10afa31 | 348 | if !$owner || ($owner != $vmid); |
3ea94c60 | 349 | |
b24f07d4 FE |
350 | return if $attr->{is_vmstate}; |
351 | ||
aee6abe5 | 352 | if (defined($snaprefs)) { |
5eca0c36 FE |
353 | $local_volumes->{$volid}->{snapshots} = 1; |
354 | ||
3629c19d DM |
355 | # we cannot migrate shapshots on local storage |
356 | # exceptions: 'zfspool' or 'qcow2' files (on directory storage) | |
357 | ||
b74cad8a | 358 | die "online storage migration not possible if snapshot exists\n" if $self->{running}; |
205dbf39 WB |
359 | if (!($scfg->{type} eq 'zfspool' |
360 | || ($scfg->{type} eq 'btrfs' && $local_volumes->{$volid}->{format} eq 'raw') | |
361 | || $local_volumes->{$volid}->{format} eq 'qcow2' | |
362 | )) { | |
5bf7f0f1 | 363 | die "non-migratable snapshot exists\n"; |
3629c19d | 364 | } |
3629c19d | 365 | } |
3a7bc9e2 FG |
366 | |
367 | die "referenced by linked clone(s)\n" | |
b10afa31 | 368 | if PVE::Storage::volume_is_base_and_used($storecfg, $volid); |
3629c19d DM |
369 | }; |
370 | ||
aee6abe5 DM |
371 | PVE::QemuServer::foreach_volid($conf, sub { |
372 | my ($volid, $attr) = @_; | |
373 | eval { $test_volid->($volid, $attr); }; | |
374 | if (my $err = $@) { | |
375 | &$log_error($err, $volid); | |
376 | } | |
377 | }); | |
3ea94c60 | 378 | |
dabf2473 | 379 | foreach my $vol (sort keys %$local_volumes) { |
b9f44d27 | 380 | my $type = $replicatable_volumes->{$vol} ? 'local, replicated' : 'local'; |
6f58fce9 WB |
381 | my $ref = $local_volumes->{$vol}->{ref}; |
382 | if ($ref eq 'storage') { | |
b9f44d27 | 383 | $self->log('info', "found $type disk '$vol' (via storage)\n"); |
6f58fce9 | 384 | } elsif ($ref eq 'config') { |
dbc9420b DM |
385 | &$log_error("can't live migrate attached local disks without with-local-disks option\n", $vol) |
386 | if $self->{running} && !$self->{opts}->{"with-local-disks"}; | |
b9f44d27 | 387 | $self->log('info', "found $type disk '$vol' (in current VM config)\n"); |
6f58fce9 | 388 | } elsif ($ref eq 'snapshot') { |
b9f44d27 | 389 | $self->log('info', "found $type disk '$vol' (referenced by snapshot(s))\n"); |
9e93a63f ML |
390 | } elsif ($ref eq 'generated') { |
391 | $self->log('info', "found generated disk '$vol' (in current VM config)\n"); | |
d62fcf74 | 392 | } else { |
b9f44d27 | 393 | $self->log('info', "found $type disk '$vol'\n"); |
d62fcf74 FG |
394 | } |
395 | } | |
396 | ||
5bf7f0f1 FG |
397 | foreach my $vol (sort keys %$local_volumes_errors) { |
398 | $self->log('warn', "can't migrate local disk '$vol': $local_volumes_errors->{$vol}"); | |
399 | } | |
400 | foreach my $err (@$other_errors) { | |
401 | $self->log('warn', "$err"); | |
402 | } | |
403 | ||
5bf7f0f1 FG |
404 | if ($abort) { |
405 | die "can't migrate VM - check log\n"; | |
406 | } | |
407 | ||
c4d2d6c1 | 408 | # additional checks for local storage |
dabf2473 | 409 | foreach my $volid (keys %$local_volumes) { |
3ea94c60 | 410 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); |
b10afa31 | 411 | my $scfg = PVE::Storage::storage_config($storecfg, $sid); |
3ea94c60 | 412 | |
205dbf39 | 413 | my $migratable = $scfg->{type} =~ /^(?:dir|btrfs|zfspool|lvmthin|lvm)$/; |
c4d2d6c1 | 414 | |
37a6dc78 | 415 | die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n" |
c4d2d6c1 | 416 | if !$migratable; |
d5604092 | 417 | |
c4d2d6c1 | 418 | # image is a linked clone on local storage, se we can't migrate. |
b10afa31 | 419 | if (my $basename = (PVE::Storage::parse_volname($storecfg, $volid))[3]) { |
c4d2d6c1 | 420 | die "can't migrate '$volid' as it's a clone of '$basename'"; |
d5604092 | 421 | } |
3ea94c60 DM |
422 | } |
423 | ||
eb3acec8 | 424 | foreach my $volid (sort keys %$local_volumes) { |
9e93a63f ML |
425 | my $ref = $local_volumes->{$volid}->{ref}; |
426 | if ($self->{running} && $ref eq 'config') { | |
efe0d457 | 427 | $local_volumes->{$volid}->{migration_mode} = 'online'; |
ad8b9d5e | 428 | } elsif ($self->{running} && $ref eq 'generated') { |
104f47a9 | 429 | # offline migrate the cloud-init ISO and don't regenerate on VM start |
f9dde219 SR |
430 | # |
431 | # tpmstate will also be offline migrated first, and in case of | |
432 | # live migration then updated by QEMU/swtpm if necessary | |
104f47a9 | 433 | $local_volumes->{$volid}->{migration_mode} = 'offline'; |
b74cad8a | 434 | } else { |
d10b78f4 | 435 | $local_volumes->{$volid}->{migration_mode} = 'offline'; |
b74cad8a | 436 | } |
3ea94c60 DM |
437 | } |
438 | }; | |
d10b78f4 FE |
439 | die "Problem found while scanning volumes - $@" if $@; |
440 | } | |
441 | ||
a6be63ac FE |
442 | sub handle_replication { |
443 | my ($self, $vmid) = @_; | |
444 | ||
445 | my $conf = $self->{vmconf}; | |
446 | my $local_volumes = $self->{local_volumes}; | |
447 | ||
448 | return if !$self->{replication_jobcfg}; | |
449 | if ($self->{running}) { | |
450 | ||
451 | my $version = PVE::QemuServer::kvm_user_version(); | |
452 | if (!min_version($version, 4, 2)) { | |
453 | die "can't live migrate VM with replicated volumes, pve-qemu to old (< 4.2)!\n" | |
454 | } | |
455 | ||
456 | my @live_replicatable_volumes = $self->filter_local_volumes('online', 1); | |
457 | foreach my $volid (@live_replicatable_volumes) { | |
458 | my $drive = $local_volumes->{$volid}->{drivename}; | |
459 | die "internal error - no drive for '$volid'\n" if !defined($drive); | |
460 | ||
461 | my $bitmap = "repl_$drive"; | |
462 | ||
463 | # start tracking before replication to get full delta + a few duplicates | |
464 | $self->log('info', "$drive: start tracking writes using block-dirty-bitmap '$bitmap'"); | |
465 | mon_cmd($vmid, 'block-dirty-bitmap-add', node => "drive-$drive", name => $bitmap); | |
466 | ||
467 | # other info comes from target node in phase 2 | |
468 | $self->{target_drive}->{$drive}->{bitmap} = $bitmap; | |
469 | } | |
470 | } | |
471 | $self->log('info', "replicating disk images"); | |
472 | ||
473 | my $start_time = time(); | |
474 | my $logfunc = sub { $self->log('info', shift) }; | |
475 | my $actual_replicated_volumes = PVE::Replication::run_replication( | |
476 | 'PVE::QemuConfig', $self->{replication_jobcfg}, $start_time, $start_time, $logfunc); | |
477 | ||
478 | # extra safety check | |
479 | my @replicated_volumes = $self->filter_local_volumes(undef, 1); | |
480 | foreach my $volid (@replicated_volumes) { | |
481 | die "expected volume '$volid' to get replicated, but it wasn't\n" | |
482 | if !$actual_replicated_volumes->{$volid}; | |
483 | } | |
484 | } | |
485 | ||
3276a434 FE |
486 | sub config_update_local_disksizes { |
487 | my ($self) = @_; | |
488 | ||
489 | my $conf = $self->{vmconf}; | |
490 | my $local_volumes = $self->{local_volumes}; | |
491 | ||
492 | PVE::QemuConfig->foreach_volume($conf, sub { | |
493 | my ($key, $drive) = @_; | |
f9dde219 SR |
494 | # skip special disks, will be handled later |
495 | return if $key eq 'efidisk0'; | |
496 | return if $key eq 'tpmstate0'; | |
3276a434 FE |
497 | |
498 | my $volid = $drive->{file}; | |
499 | return if !defined($local_volumes->{$volid}); # only update sizes for local volumes | |
500 | ||
501 | my ($updated, $msg) = PVE::QemuServer::Drive::update_disksize($drive, $local_volumes->{$volid}->{size}); | |
502 | if (defined($updated)) { | |
503 | $conf->{$key} = PVE::QemuServer::print_drive($updated); | |
504 | $self->log('info', "drive '$key': $msg"); | |
505 | } | |
506 | }); | |
507 | ||
508 | # we want to set the efidisk size in the config to the size of the | |
509 | # real OVMF_VARS.fd image, else we can create a too big image, which does not work | |
510 | if (defined($conf->{efidisk0})) { | |
511 | PVE::QemuServer::update_efidisk_size($conf); | |
512 | } | |
f9dde219 SR |
513 | |
514 | # TPM state might have an irregular filesize, to avoid problems on transfer | |
515 | # we always assume the static size of 4M to allocate on the target | |
516 | if (defined($conf->{tpmstate0})) { | |
517 | PVE::QemuServer::update_tpmstate_size($conf); | |
518 | } | |
3276a434 FE |
519 | } |
520 | ||
d10b78f4 | 521 | sub filter_local_volumes { |
4b26ffbf | 522 | my ($self, $migration_mode, $replicated) = @_; |
d10b78f4 FE |
523 | |
524 | my $volumes = $self->{local_volumes}; | |
525 | my @filtered_volids; | |
526 | ||
527 | foreach my $volid (sort keys %{$volumes}) { | |
528 | next if defined($migration_mode) && safe_string_ne($volumes->{$volid}->{migration_mode}, $migration_mode); | |
4b26ffbf | 529 | next if defined($replicated) && safe_boolean_ne($volumes->{$volid}->{replicated}, $replicated); |
d10b78f4 FE |
530 | push @filtered_volids, $volid; |
531 | } | |
532 | ||
533 | return @filtered_volids; | |
534 | } | |
535 | ||
536 | sub sync_offline_local_volumes { | |
537 | my ($self) = @_; | |
538 | ||
539 | my $local_volumes = $self->{local_volumes}; | |
4b26ffbf | 540 | my @volids = $self->filter_local_volumes('offline', 0); |
d10b78f4 FE |
541 | |
542 | my $storecfg = $self->{storecfg}; | |
543 | my $opts = $self->{opts}; | |
544 | ||
545 | $self->log('info', "copying local disk images") if scalar(@volids); | |
546 | ||
547 | foreach my $volid (@volids) { | |
c3417e3b FE |
548 | my $targetsid = $local_volumes->{$volid}->{targetsid}; |
549 | my $bwlimit = $local_volumes->{$volid}->{bwlimit}; | |
550 | $bwlimit = $bwlimit * 1024 if defined($bwlimit); # storage_migrate uses bps | |
d10b78f4 FE |
551 | |
552 | my $storage_migrate_opts = { | |
553 | 'ratelimit_bps' => $bwlimit, | |
554 | 'insecure' => $opts->{migration_type} eq 'insecure', | |
555 | 'with_snapshots' => $local_volumes->{$volid}->{snapshots}, | |
556 | 'allow_rename' => !$local_volumes->{$volid}->{is_vmstate}, | |
557 | }; | |
558 | ||
559 | my $logfunc = sub { $self->log('info', $_[0]); }; | |
560 | my $new_volid = eval { | |
561 | PVE::Storage::storage_migrate($storecfg, $volid, $self->{ssh_info}, | |
562 | $targetsid, $storage_migrate_opts, $logfunc); | |
563 | }; | |
564 | if (my $err = $@) { | |
565 | die "storage migration for '$volid' to storage '$targetsid' failed - $err\n"; | |
566 | } | |
567 | ||
568 | $self->{volume_map}->{$volid} = $new_volid; | |
569 | $self->log('info', "volume '$volid' is '$new_volid' on the target\n"); | |
570 | ||
571 | eval { PVE::Storage::deactivate_volumes($storecfg, [$volid]); }; | |
572 | if (my $err = $@) { | |
573 | $self->log('warn', $err); | |
574 | } | |
575 | } | |
3ea94c60 DM |
576 | } |
577 | ||
b74cad8a AD |
578 | sub cleanup_remotedisks { |
579 | my ($self) = @_; | |
580 | ||
4b26ffbf FE |
581 | my $local_volumes = $self->{local_volumes}; |
582 | ||
eb5751ba | 583 | foreach my $volid (values %{$self->{volume_map}}) { |
9b6efe43 | 584 | # don't clean up replicated disks! |
4b26ffbf | 585 | next if $local_volumes->{$volid}->{replicated}; |
b74cad8a | 586 | |
eb5751ba | 587 | my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid); |
b74cad8a AD |
588 | |
589 | my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"]; | |
590 | ||
591 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
592 | if (my $err = $@) { | |
593 | $self->log('err', $err); | |
594 | $self->{errors} = 1; | |
595 | } | |
596 | } | |
597 | } | |
598 | ||
9b6efe43 FG |
599 | sub cleanup_bitmaps { |
600 | my ($self) = @_; | |
7f5fb49a | 601 | foreach my $drive (keys %{$self->{target_drive}}) { |
9b6efe43 FG |
602 | my $bitmap = $self->{target_drive}->{$drive}->{bitmap}; |
603 | next if !$bitmap; | |
604 | $self->log('info', "$drive: removing block-dirty-bitmap '$bitmap'"); | |
605 | mon_cmd($self->{vmid}, 'block-dirty-bitmap-remove', node => "drive-$drive", name => $bitmap); | |
606 | } | |
607 | } | |
608 | ||
1e3baf05 | 609 | sub phase1 { |
16e903f2 | 610 | my ($self, $vmid) = @_; |
1e3baf05 | 611 | |
16e903f2 | 612 | $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})"); |
1e3baf05 | 613 | |
16e903f2 | 614 | my $conf = $self->{vmconf}; |
1e3baf05 DM |
615 | |
616 | # set migrate lock in config file | |
1858638f | 617 | $conf->{lock} = 'migrate'; |
ffda963f | 618 | PVE::QemuConfig->write_config($vmid, $conf); |
1e3baf05 | 619 | |
d10b78f4 | 620 | $self->scan_local_volumes($vmid); |
3276a434 FE |
621 | |
622 | # fix disk sizes to match their actual size and write changes, | |
623 | # so that the target allocates the correct volumes | |
624 | $self->config_update_local_disksizes(); | |
68b108ee | 625 | PVE::QemuConfig->write_config($vmid, $conf); |
d10b78f4 | 626 | |
a6be63ac FE |
627 | $self->handle_replication($vmid); |
628 | ||
d10b78f4 | 629 | $self->sync_offline_local_volumes(); |
1e3baf05 DM |
630 | }; |
631 | ||
16e903f2 DM |
632 | sub phase1_cleanup { |
633 | my ($self, $vmid, $err) = @_; | |
634 | ||
635 | $self->log('info', "aborting phase 1 - cleanup resources"); | |
636 | ||
1858638f DM |
637 | my $conf = $self->{vmconf}; |
638 | delete $conf->{lock}; | |
ffda963f | 639 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; |
16e903f2 DM |
640 | if (my $err = $@) { |
641 | $self->log('err', $err); | |
642 | } | |
f5eb281a | 643 | |
eb5751ba FE |
644 | eval { $self->cleanup_remotedisks() }; |
645 | if (my $err = $@) { | |
646 | $self->log('err', $err); | |
16e903f2 | 647 | } |
9b6efe43 FG |
648 | |
649 | eval { $self->cleanup_bitmaps() }; | |
650 | if (my $err =$@) { | |
651 | $self->log('err', $err); | |
652 | } | |
16e903f2 DM |
653 | } |
654 | ||
1e3baf05 | 655 | sub phase2 { |
16e903f2 | 656 | my ($self, $vmid) = @_; |
1e3baf05 | 657 | |
16e903f2 | 658 | my $conf = $self->{vmconf}; |
c3417e3b | 659 | my $local_volumes = $self->{local_volumes}; |
efe0d457 FE |
660 | my @online_local_volumes = $self->filter_local_volumes('online'); |
661 | ||
662 | $self->{storage_migration} = 1 if scalar(@online_local_volumes); | |
16e903f2 | 663 | |
46a84fd4 | 664 | $self->log('info', "starting VM $vmid on remote node '$self->{node}'"); |
1e3baf05 | 665 | |
5bc1e039 | 666 | my $raddr; |
1e3baf05 | 667 | my $rport; |
1c9d54bf | 668 | my $ruri; # the whole migration dst. URI (protocol:address[:port]) |
7e8dcf2c AD |
669 | my $nodename = PVE::INotify::nodename(); |
670 | ||
19672434 | 671 | ## start on remote node |
95a4b4a9 AD |
672 | my $cmd = [@{$self->{rem_ssh}}]; |
673 | ||
7c14dcae | 674 | my $spice_ticket; |
86b8228b | 675 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { |
0a13e08e | 676 | my $res = mon_cmd($vmid, 'query-spice'); |
7c14dcae | 677 | $spice_ticket = $res->{ticket}; |
95a4b4a9 AD |
678 | } |
679 | ||
1c9d54bf TL |
680 | push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename; |
681 | ||
f1c2a53a | 682 | my $migration_type = $self->{opts}->{migration_type}; |
2de2d6f7 TL |
683 | |
684 | push @$cmd, '--migration_type', $migration_type; | |
685 | ||
686 | push @$cmd, '--migration_network', $self->{opts}->{migration_network} | |
687 | if $self->{opts}->{migration_network}; | |
688 | ||
689 | if ($migration_type eq 'insecure') { | |
1c9d54bf TL |
690 | push @$cmd, '--stateuri', 'tcp'; |
691 | } else { | |
692 | push @$cmd, '--stateuri', 'unix'; | |
693 | } | |
95a4b4a9 | 694 | |
42668529 DM |
695 | if ($self->{forcemachine}) { |
696 | push @$cmd, '--machine', $self->{forcemachine}; | |
697 | } | |
698 | ||
58c64ad5 SR |
699 | if ($self->{forcecpu}) { |
700 | push @$cmd, '--force-cpu', $self->{forcecpu}; | |
701 | } | |
702 | ||
efe0d457 | 703 | if ($self->{storage_migration}) { |
4530494b | 704 | push @$cmd, '--targetstorage', ($self->{opts}->{targetstorage} // '1'); |
b74cad8a AD |
705 | } |
706 | ||
86b8228b | 707 | my $spice_port; |
ae194a5c | 708 | my $unix_socket_info = {}; |
7827de41 ML |
709 | # version > 0 for unix socket support |
710 | my $nbd_protocol_version = 1; | |
692f604b | 711 | my $input = "nbd_protocol_version: $nbd_protocol_version\n"; |
fd95d780 | 712 | |
13d121d7 FE |
713 | my @offline_local_volumes = $self->filter_local_volumes('offline'); |
714 | for my $volid (@offline_local_volumes) { | |
715 | my $drivename = $local_volumes->{$volid}->{drivename}; | |
716 | next if !$drivename || !$conf->{$drivename}; | |
717 | ||
718 | my $new_volid = $self->{volume_map}->{$volid}; | |
719 | next if !$new_volid || $volid eq $new_volid; | |
720 | ||
721 | # FIXME PVE 8.x only use offline_volume variant once all targets can handle it | |
722 | if ($drivename eq 'tpmstate0') { | |
723 | $input .= "$drivename: $new_volid\n" | |
724 | } else { | |
725 | $input .= "offline_volume: $drivename: $new_volid\n" | |
726 | } | |
fd95d780 FG |
727 | } |
728 | ||
692f604b | 729 | $input .= "spice_ticket: $spice_ticket\n" if $spice_ticket; |
cee620e6 | 730 | |
4b26ffbf FE |
731 | my @online_replicated_volumes = $self->filter_local_volumes('online', 1); |
732 | foreach my $volid (@online_replicated_volumes) { | |
efe0d457 | 733 | $input .= "replicated_volume: $volid\n"; |
88126be3 FG |
734 | } |
735 | ||
efbbe59d FE |
736 | my $handle_storage_migration_listens = sub { |
737 | my ($drive_key, $drivestr, $nbd_uri) = @_; | |
738 | ||
739 | $self->{stopnbd} = 1; | |
740 | $self->{target_drive}->{$drive_key}->{drivestr} = $drivestr; | |
741 | $self->{target_drive}->{$drive_key}->{nbd_uri} = $nbd_uri; | |
742 | ||
743 | my $source_drive = PVE::QemuServer::parse_drive($drive_key, $conf->{$drive_key}); | |
744 | my $target_drive = PVE::QemuServer::parse_drive($drive_key, $drivestr); | |
745 | my $source_volid = $source_drive->{file}; | |
746 | my $target_volid = $target_drive->{file}; | |
747 | ||
748 | $self->{volume_map}->{$source_volid} = $target_volid; | |
749 | $self->log('info', "volume '$source_volid' is '$target_volid' on the target\n"); | |
750 | }; | |
751 | ||
88126be3 | 752 | my $target_replicated_volumes = {}; |
86b8228b | 753 | |
7c14dcae DM |
754 | # Note: We try to keep $spice_ticket secret (do not pass via command line parameter) |
755 | # instead we pipe it through STDIN | |
7827de41 | 756 | my $exitcode = PVE::Tools::run_command($cmd, input => $input, outfunc => sub { |
1e3baf05 DM |
757 | my $line = shift; |
758 | ||
407e0b8b | 759 | if ($line =~ m/^migration listens on tcp:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+)$/) { |
5bc1e039 SP |
760 | $raddr = $1; |
761 | $rport = int($2); | |
1c9d54bf TL |
762 | $ruri = "tcp:$raddr:$rport"; |
763 | } | |
764 | elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) { | |
765 | $raddr = $1; | |
766 | die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2; | |
767 | $ruri = "unix:$raddr"; | |
5bc1e039 SP |
768 | } |
769 | elsif ($line =~ m/^migration listens on port (\d+)$/) { | |
770 | $raddr = "localhost"; | |
86b8228b | 771 | $rport = int($1); |
1c9d54bf | 772 | $ruri = "tcp:$raddr:$rport"; |
5bc1e039 | 773 | } |
f3a483b6 | 774 | elsif ($line =~ m/^spice listens on port (\d+)$/) { |
86b8228b | 775 | $spice_port = int($1); |
1e3baf05 | 776 | } |
769f187d | 777 | elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) { |
8b02e568 | 778 | my $drivestr = $4; |
b74cad8a AD |
779 | my $nbd_uri = "nbd:$1:$2:exportname=$3"; |
780 | my $targetdrive = $3; | |
781 | $targetdrive =~ s/drive-//g; | |
782 | ||
efbbe59d | 783 | $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri); |
7827de41 ML |
784 | } elsif ($line =~ m!^storage migration listens on nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) { |
785 | my $drivestr = $4; | |
786 | die "Destination UNIX socket's VMID does not match source VMID" if $vmid ne $2; | |
787 | my $nbd_unix_addr = $1; | |
788 | my $nbd_uri = "nbd:unix:$nbd_unix_addr:exportname=$3"; | |
789 | my $targetdrive = $3; | |
790 | $targetdrive =~ s/drive-//g; | |
b74cad8a | 791 | |
efbbe59d | 792 | $handle_storage_migration_listens->($targetdrive, $drivestr, $nbd_uri); |
ae194a5c | 793 | $unix_socket_info->{$nbd_unix_addr} = 1; |
88126be3 FG |
794 | } elsif ($line =~ m/^re-using replicated volume: (\S+) - (.*)$/) { |
795 | my $drive = $1; | |
796 | my $volid = $2; | |
797 | $target_replicated_volumes->{$volid} = $drive; | |
8bf30c2a SR |
798 | } elsif ($line =~ m/^QEMU: (.*)$/) { |
799 | $self->log('info', "[$self->{node}] $1\n"); | |
b74cad8a | 800 | } |
ab399b7c AD |
801 | }, errfunc => sub { |
802 | my $line = shift; | |
8bf30c2a | 803 | $self->log('info', "[$self->{node}] $line"); |
6e0216d8 SR |
804 | }, noerr => 1); |
805 | ||
806 | die "remote command failed with exit code $exitcode\n" if $exitcode; | |
1e3baf05 | 807 | |
5bc1e039 | 808 | die "unable to detect remote migration address\n" if !$raddr; |
1ef75254 | 809 | |
4b26ffbf | 810 | if (scalar(keys %$target_replicated_volumes) != scalar(@online_replicated_volumes)) { |
88126be3 FG |
811 | die "number of replicated disks on source and target node do not match - target node too old?\n" |
812 | } | |
813 | ||
d296ed08 | 814 | $self->log('info', "start remote tunnel"); |
7d730f95 | 815 | $self->start_remote_tunnel($raddr, $rport, $ruri, $unix_socket_info); |
d296ed08 | 816 | |
efe0d457 | 817 | if ($self->{storage_migration}) { |
b74cad8a AD |
818 | $self->{storage_migration_jobs} = {}; |
819 | $self->log('info', "starting storage migration"); | |
820 | ||
bd2d5fe6 | 821 | die "The number of local disks does not match between the source and the destination.\n" |
efe0d457 | 822 | if (scalar(keys %{$self->{target_drive}}) != scalar(@online_local_volumes)); |
b74cad8a | 823 | foreach my $drive (keys %{$self->{target_drive}}){ |
d189e590 SI |
824 | my $target = $self->{target_drive}->{$drive}; |
825 | my $nbd_uri = $target->{nbd_uri}; | |
683ab654 | 826 | |
1764fa05 | 827 | my $source_drive = PVE::QemuServer::parse_drive($drive, $conf->{$drive}); |
97ece9dd | 828 | my $source_volid = $source_drive->{file}; |
97ece9dd | 829 | |
c3417e3b | 830 | my $bwlimit = $local_volumes->{$source_volid}->{bwlimit}; |
9b6efe43 | 831 | my $bitmap = $target->{bitmap}; |
d189e590 | 832 | |
d108cb1e | 833 | $self->log('info', "$drive: start migration to $nbd_uri"); |
9b6efe43 | 834 | PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 'skip', undef, $bwlimit, $bitmap); |
b74cad8a AD |
835 | } |
836 | } | |
837 | ||
1c9d54bf | 838 | $self->log('info', "starting online/live migration on $ruri"); |
5bc1e039 | 839 | $self->{livemigration} = 1; |
e18b0b99 | 840 | |
3beb415b AD |
841 | # load_defaults |
842 | my $defaults = PVE::QemuServer::load_defaults(); | |
843 | ||
7de328c6 TL |
844 | $self->log('info', "set migration capabilities"); |
845 | eval { PVE::QemuServer::set_migration_caps($vmid) }; | |
485449e3 SR |
846 | warn $@ if $@; |
847 | ||
848 | my $qemu_migrate_params = {}; | |
849 | ||
ddd664d7 SI |
850 | # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the |
851 | # migrate_speed parameter in qm.conf - take the lower of the two. | |
c3417e3b | 852 | my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', undef, $self->{opts}->{bwlimit}) // 0; |
2c4ba4c3 | 853 | my $migrate_speed = $conf->{migrate_speed} // 0; |
8f43ac48 | 854 | $migrate_speed *= 1024; # migrate_speed is in MB/s, bwlimit in KB/s |
ddd664d7 | 855 | |
2c4ba4c3 FE |
856 | if ($bwlimit && $migrate_speed) { |
857 | $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed; | |
858 | } else { | |
859 | $migrate_speed ||= $bwlimit; | |
860 | } | |
a89bd100 | 861 | $migrate_speed ||= ($defaults->{migrate_speed} || 0) * 1024; |
ddd664d7 | 862 | |
a89bd100 TL |
863 | if ($migrate_speed) { |
864 | $migrate_speed *= 1024; # qmp takes migrate_speed in B/s. | |
0fca250a | 865 | $self->log('info', "migration speed limit: ". render_bytes($migrate_speed, 1) ."/s"); |
8f43ac48 TL |
866 | } else { |
867 | # always set migrate speed as QEMU default to 128 MiBps == 1 Gbps, use 16 GiBps == 128 Gbps | |
868 | $migrate_speed = (16 << 30); | |
a89bd100 | 869 | } |
8f43ac48 | 870 | $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed); |
3beb415b AD |
871 | |
872 | my $migrate_downtime = $defaults->{migrate_downtime}; | |
873 | $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime}); | |
c05f1b33 SR |
874 | # migrate-set-parameters expects limit in ms |
875 | $migrate_downtime *= 1000; | |
876 | $self->log('info', "migration downtime limit: $migrate_downtime ms"); | |
877 | $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime); | |
3beb415b | 878 | |
171ed95c EK |
879 | # set cachesize to 10% of the total memory |
880 | my $memory = $conf->{memory} || $defaults->{memory}; | |
881 | my $cachesize = int($memory * 1048576 / 10); | |
50d8dd5d AD |
882 | $cachesize = round_powerof2($cachesize); |
883 | ||
0fca250a | 884 | $self->log('info', "migration cachesize: " . render_bytes($cachesize, 1)); |
485449e3 SR |
885 | $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize); |
886 | ||
887 | $self->log('info', "set migration parameters"); | |
e18b0b99 | 888 | eval { |
485449e3 | 889 | mon_cmd($vmid, "migrate-set-parameters", %{$qemu_migrate_params}); |
e18b0b99 | 890 | }; |
485449e3 | 891 | $self->log('info', "migrate-set-parameters error: $@") if $@; |
f34d1466 | 892 | |
86b8228b | 893 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { |
95a4b4a9 AD |
894 | my $rpcenv = PVE::RPCEnvironment::get(); |
895 | my $authuser = $rpcenv->get_user(); | |
896 | ||
86b8228b | 897 | my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node}); |
95a4b4a9 | 898 | |
86b8228b | 899 | my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem"; |
769f187d | 900 | my $subject = PVE::AccessControl::read_x509_subject_spice($filename); |
95a4b4a9 AD |
901 | |
902 | $self->log('info', "spice client_migrate_info"); | |
903 | ||
904 | eval { | |
0a13e08e | 905 | mon_cmd($vmid, "client_migrate_info", protocol => 'spice', |
ccab68c2 | 906 | hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port, |
86b8228b | 907 | 'cert-subject' => $subject); |
95a4b4a9 AD |
908 | }; |
909 | $self->log('info', "client_migrate_info error: $@") if $@; | |
910 | ||
911 | } | |
912 | ||
9938d24d FE |
913 | my $start = time(); |
914 | ||
f34d1466 | 915 | $self->log('info', "start migrate command to $ruri"); |
5a7835f5 | 916 | eval { |
0a13e08e | 917 | mon_cmd($vmid, "migrate", uri => $ruri); |
5a7835f5 AD |
918 | }; |
919 | my $merr = $@; | |
1c9d54bf | 920 | $self->log('info', "migrate uri => $ruri failed: $merr") if $merr; |
1e3baf05 | 921 | |
e693c491 | 922 | my $last_mem_transferred = 0; |
4305207d | 923 | my $usleep = 1000000; |
e52bd94c | 924 | my $i = 0; |
b0b756c1 | 925 | my $err_count = 0; |
865ef132 SP |
926 | my $lastrem = undef; |
927 | my $downtimecounter = 0; | |
1e3baf05 | 928 | while (1) { |
e52bd94c | 929 | $i++; |
e693c491 | 930 | my $avglstat = $last_mem_transferred ? $last_mem_transferred / $i : 0; |
e52bd94c | 931 | |
b0b756c1 | 932 | usleep($usleep); |
6539865a TL |
933 | |
934 | my $stat = eval { mon_cmd($vmid, "query-migrate") }; | |
b0b756c1 DM |
935 | if (my $err = $@) { |
936 | $err_count++; | |
937 | warn "query migrate failed: $err\n"; | |
f34d1466 | 938 | $self->log('info', "query migrate failed: $err"); |
b0b756c1 | 939 | if ($err_count <= 5) { |
6539865a | 940 | usleep(1_000_000); |
b0b756c1 DM |
941 | next; |
942 | } | |
943 | die "too many query migrate failures - aborting\n"; | |
944 | } | |
985a5f48 | 945 | |
6539865a TL |
946 | my $status = $stat->{status}; |
947 | if (defined($status) && $status =~ m/^(setup)$/im) { | |
948 | sleep(1); | |
949 | next; | |
950 | } | |
f5eb281a | 951 | |
6539865a TL |
952 | if (!defined($status) || $status !~ m/^(active|completed|failed|cancelled)$/im) { |
953 | die $merr if $merr; | |
954 | die "unable to parse migration status '$status' - aborting\n"; | |
955 | } | |
956 | $merr = undef; | |
957 | $err_count = 0; | |
958 | ||
e693c491 TL |
959 | my $memstat = $stat->{ram}; |
960 | ||
6539865a TL |
961 | if ($status eq 'completed') { |
962 | my $delay = time() - $start; | |
963 | if ($delay > 0) { | |
0fca250a TL |
964 | my $total = $memstat->{total} || 0; |
965 | my $avg_speed = render_bytes($total / $delay, 1); | |
6539865a | 966 | my $downtime = $stat->{downtime} || 0; |
0fca250a | 967 | $self->log('info', "average migration speed: $avg_speed/s - downtime $downtime ms"); |
1e3baf05 | 968 | } |
6539865a | 969 | } |
1e3baf05 | 970 | |
6539865a TL |
971 | if ($status eq 'failed' || $status eq 'cancelled') { |
972 | $self->log('info', "migration status error: $status"); | |
973 | die "aborting\n" | |
974 | } | |
a05b47a8 | 975 | |
6539865a TL |
976 | if ($status ne 'active') { |
977 | $self->log('info', "migration status: $status"); | |
978 | last; | |
979 | } | |
2e787b18 | 980 | |
e693c491 TL |
981 | if ($memstat->{transferred} ne $last_mem_transferred) { |
982 | my $trans = $memstat->{transferred} || 0; | |
983 | my $rem = $memstat->{remaining} || 0; | |
984 | my $total = $memstat->{total} || 0; | |
0fca250a TL |
985 | my $speed = ($memstat->{'pages-per-second'} // 0) * ($memstat->{'page-size'} // 0); |
986 | my $dirty_rate = ($memstat->{'dirty-pages-rate'} // 0) * ($memstat->{'page-size'} // 0); | |
a05b47a8 | 987 | |
6539865a TL |
988 | # reduce sleep if remainig memory is lower than the average transfer speed |
989 | $usleep = 100_000 if $avglstat && $rem < $avglstat; | |
865ef132 | 990 | |
b68a957b TL |
991 | # also reduce loggin if we poll more frequent |
992 | my $should_log = $usleep > 100_000 ? 1 : ($i % 10) == 0; | |
370b05e7 | 993 | |
0fca250a TL |
994 | my $total_h = render_bytes($total, 1); |
995 | my $transferred_h = render_bytes($trans, 1); | |
996 | my $speed_h = render_bytes($speed, 1); | |
997 | ||
998 | my $progress = "transferred $transferred_h of $total_h VM-state, ${speed_h}/s"; | |
999 | ||
1000 | if ($dirty_rate > $speed) { | |
1001 | my $dirty_rate_h = render_bytes($dirty_rate, 1); | |
1002 | $progress .= ", VM dirties lots of memory: $dirty_rate_h/s"; | |
1003 | } | |
1004 | ||
b68a957b | 1005 | $self->log('info', "migration $status, $progress") if $should_log; |
0fca250a TL |
1006 | |
1007 | my $xbzrle = $stat->{"xbzrle-cache"} || {}; | |
1008 | my ($xbzrlebytes, $xbzrlepages) = $xbzrle->@{'bytes', 'pages'}; | |
1009 | if ($xbzrlebytes || $xbzrlepages) { | |
1010 | my $bytes_h = render_bytes($xbzrlebytes, 1); | |
1011 | ||
1012 | my $msg = "send updates to $xbzrlepages pages in $bytes_h encoded memory"; | |
1013 | ||
1014 | $msg .= sprintf(", cache-miss %.2f%%", $xbzrle->{'cache-miss-rate'} * 100) | |
1015 | if $xbzrle->{'cache-miss-rate'}; | |
1016 | ||
1017 | $msg .= ", overflow $xbzrle->{overflow}" if $xbzrle->{overflow}; | |
1018 | ||
b68a957b | 1019 | $self->log('info', "xbzrle: $msg") if $should_log; |
6539865a TL |
1020 | } |
1021 | ||
e693c491 | 1022 | if (($lastrem && $rem > $lastrem) || ($rem == 0)) { |
6539865a TL |
1023 | $downtimecounter++; |
1024 | } | |
1025 | $lastrem = $rem; | |
1026 | ||
1027 | if ($downtimecounter > 5) { | |
1028 | $downtimecounter = 0; | |
1029 | $migrate_downtime *= 2; | |
1030 | $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms"); | |
1031 | eval { | |
1032 | # migrate-set-parameters does not touch values not | |
1033 | # specified, so this only changes downtime-limit | |
1034 | mon_cmd($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime)); | |
1035 | }; | |
1036 | $self->log('info', "migrate-set-parameters error: $@") if $@; | |
1037 | } | |
1e3baf05 | 1038 | } |
6539865a | 1039 | |
e693c491 | 1040 | $last_mem_transferred = $memstat->{transferred}; |
a05b47a8 | 1041 | } |
0783c3c2 FE |
1042 | |
1043 | if ($self->{storage_migration}) { | |
1044 | # finish block-job with block-job-cancel, to disconnect source VM from NBD | |
1045 | # to avoid it trying to re-establish it. We are in blockjob ready state, | |
1046 | # thus, this command changes to it to blockjob complete (see qapi docs) | |
1047 | eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, 'cancel'); }; | |
1048 | if (my $err = $@) { | |
1049 | die "Failed to complete storage migration: $err\n"; | |
1050 | } | |
1051 | } | |
1e3baf05 | 1052 | } |
16e903f2 | 1053 | |
c04b5b04 AD |
1054 | sub phase2_cleanup { |
1055 | my ($self, $vmid, $err) = @_; | |
1056 | ||
af30308f DM |
1057 | return if !$self->{errors}; |
1058 | $self->{phase2errors} = 1; | |
1059 | ||
c04b5b04 AD |
1060 | $self->log('info', "aborting phase 2 - cleanup resources"); |
1061 | ||
19168b91 SP |
1062 | $self->log('info', "migrate_cancel"); |
1063 | eval { | |
0a13e08e | 1064 | mon_cmd($vmid, "migrate_cancel"); |
19168b91 SP |
1065 | }; |
1066 | $self->log('info', "migrate_cancel error: $@") if $@; | |
1067 | ||
8a0d269b FE |
1068 | my $vm_status = eval { |
1069 | mon_cmd($vmid, 'query-status')->{status} or die "no 'status' in result\n"; | |
1070 | }; | |
1071 | $self->log('err', "query-status error: $@") if $@; | |
1072 | ||
1073 | # Can end up in POSTMIGRATE state if failure occurred after convergence. Try going back to | |
1074 | # original state. Unfortunately, direct transition from POSTMIGRATE to PAUSED is not possible. | |
1075 | if ($vm_status && $vm_status eq 'postmigrate') { | |
1076 | if (!$self->{vm_was_paused}) { | |
1077 | eval { mon_cmd($vmid, 'cont'); }; | |
1078 | $self->log('err', "resuming VM failed: $@") if $@; | |
1079 | } else { | |
1080 | $self->log('err', "VM was paused, but ended in postmigrate state"); | |
1081 | } | |
1082 | } | |
1083 | ||
c04b5b04 AD |
1084 | my $conf = $self->{vmconf}; |
1085 | delete $conf->{lock}; | |
ffda963f | 1086 | eval { PVE::QemuConfig->write_config($vmid, $conf) }; |
c04b5b04 AD |
1087 | if (my $err = $@) { |
1088 | $self->log('err', $err); | |
1089 | } | |
1090 | ||
af30308f | 1091 | # cleanup ressources on target host |
3b4cf0f0 | 1092 | if ($self->{storage_migration}) { |
b74cad8a AD |
1093 | eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; |
1094 | if (my $err = $@) { | |
1095 | $self->log('err', $err); | |
1096 | } | |
9b3f5a5c | 1097 | } |
b74cad8a | 1098 | |
9b3f5a5c FG |
1099 | eval { $self->cleanup_bitmaps() }; |
1100 | if (my $err =$@) { | |
1101 | $self->log('err', $err); | |
b74cad8a AD |
1102 | } |
1103 | ||
af30308f | 1104 | my $nodename = PVE::INotify::nodename(); |
370b05e7 | 1105 | |
af30308f DM |
1106 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename]; |
1107 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
1108 | if (my $err = $@) { | |
1109 | $self->log('err', $err); | |
1110 | $self->{errors} = 1; | |
1111 | } | |
386c6ba7 | 1112 | |
9b3f5a5c FG |
1113 | # cleanup after stopping, otherwise disks might be in-use by target VM! |
1114 | eval { PVE::QemuMigrate::cleanup_remotedisks($self) }; | |
1115 | if (my $err = $@) { | |
1116 | $self->log('err', $err); | |
1117 | } | |
1118 | ||
1119 | ||
386c6ba7 | 1120 | if ($self->{tunnel}) { |
e594231b | 1121 | eval { PVE::Tunnel::finish_tunnel($self->{tunnel}); }; |
386c6ba7 WL |
1122 | if (my $err = $@) { |
1123 | $self->log('err', $err); | |
1124 | $self->{errors} = 1; | |
1125 | } | |
1126 | } | |
c04b5b04 AD |
1127 | } |
1128 | ||
16e903f2 DM |
1129 | sub phase3 { |
1130 | my ($self, $vmid) = @_; | |
f5eb281a | 1131 | |
ad8b9d5e | 1132 | return; |
16e903f2 DM |
1133 | } |
1134 | ||
1135 | sub phase3_cleanup { | |
1136 | my ($self, $vmid, $err) = @_; | |
1137 | ||
1138 | my $conf = $self->{vmconf}; | |
af30308f | 1139 | return if $self->{phase2errors}; |
16e903f2 | 1140 | |
1d5aaa1d FG |
1141 | my $tunnel = $self->{tunnel}; |
1142 | ||
97ece9dd | 1143 | if ($self->{volume_map}) { |
38311a1d TL |
1144 | my $target_drives = $self->{target_drive}; |
1145 | ||
1146 | # FIXME: for NBD storage migration we now only update the volid, and | |
1147 | # not the full drivestr from the target node. Workaround that until we | |
1148 | # got some real rescan, to avoid things like wrong format in the drive | |
1149 | delete $conf->{$_} for keys %$target_drives; | |
97ece9dd | 1150 | PVE::QemuConfig->update_volume_ids($conf, $self->{volume_map}); |
38311a1d TL |
1151 | |
1152 | for my $drive (keys %$target_drives) { | |
1153 | $conf->{$drive} = $target_drives->{$drive}->{drivestr}; | |
1154 | } | |
37666e4c FE |
1155 | PVE::QemuConfig->write_config($vmid, $conf); |
1156 | } | |
1157 | ||
dbc9420b | 1158 | # transfer replication state before move config |
c2c96d73 | 1159 | $self->transfer_replication_state() if $self->{is_replicated}; |
27fa645e | 1160 | PVE::QemuConfig->move_config_to_node($vmid, $self->{node}); |
c2c96d73 | 1161 | $self->switch_replication_job_target() if $self->{is_replicated}; |
dbc9420b | 1162 | |
5bc1e039 | 1163 | if ($self->{livemigration}) { |
3e802221 TL |
1164 | if ($self->{stopnbd}) { |
1165 | $self->log('info', "stopping NBD storage migration server on target."); | |
504105c6 FG |
1166 | # stop nbd server on remote vm - requirement for resume since 2.9 |
1167 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid]; | |
1168 | ||
1169 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
1170 | if (my $err = $@) { | |
1171 | $self->log('err', $err); | |
1172 | $self->{errors} = 1; | |
1173 | } | |
1174 | } | |
1d5aaa1d | 1175 | |
a183576e FE |
1176 | if (!$self->{vm_was_paused}) { |
1177 | # config moved and nbd server stopped - now we can resume vm on target | |
1178 | if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) { | |
1179 | eval { | |
1180 | PVE::Tunnel::write_tunnel($tunnel, 30, "resume $vmid"); | |
1181 | }; | |
1182 | if (my $err = $@) { | |
1183 | $self->log('err', $err); | |
1184 | $self->{errors} = 1; | |
1185 | } | |
1186 | } else { | |
1187 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck']; | |
1188 | my $logf = sub { | |
1189 | my $line = shift; | |
1190 | $self->log('err', $line); | |
1191 | }; | |
1192 | eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => $logf); }; | |
1193 | if (my $err = $@) { | |
1194 | $self->log('err', $err); | |
1195 | $self->{errors} = 1; | |
1196 | } | |
1d5aaa1d | 1197 | } |
0028391f | 1198 | } |
ca662131 | 1199 | |
0028391f FE |
1200 | if ( |
1201 | $self->{storage_migration} | |
1202 | && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} | |
1203 | && $self->{running} | |
1204 | ) { | |
1205 | if (!$self->{vm_was_paused}) { | |
1206 | $self->log('info', "issuing guest fstrim"); | |
a183576e | 1207 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'guest', 'cmd', $vmid, 'fstrim']; |
0028391f FE |
1208 | eval { PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; |
1209 | if (my $err = $@) { | |
1210 | $self->log('err', "fstrim failed - $err"); | |
1211 | $self->{errors} = 1; | |
1212 | } | |
1213 | } else { | |
1214 | $self->log('info', "skipping guest fstrim, because VM is paused"); | |
a183576e | 1215 | } |
ca662131 | 1216 | } |
b67900f1 AD |
1217 | } |
1218 | ||
2e7fee87 FG |
1219 | # close tunnel on successful migration, on error phase2_cleanup closed it |
1220 | if ($tunnel) { | |
e594231b | 1221 | eval { PVE::Tunnel::finish_tunnel($tunnel); }; |
2e7fee87 FG |
1222 | if (my $err = $@) { |
1223 | $self->log('err', $err); | |
1224 | $self->{errors} = 1; | |
1225 | } | |
1226 | } | |
1227 | ||
fd8469f7 | 1228 | eval { |
fd8469f7 AD |
1229 | my $timer = 0; |
1230 | if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) { | |
1231 | $self->log('info', "Waiting for spice server migration"); | |
1232 | while (1) { | |
0a13e08e | 1233 | my $res = mon_cmd($vmid, 'query-spice'); |
fd8469f7 AD |
1234 | last if int($res->{'migrated'}) == 1; |
1235 | last if $timer > 50; | |
1236 | $timer ++; | |
1237 | usleep(200000); | |
769f187d | 1238 | } |
fd8469f7 AD |
1239 | } |
1240 | }; | |
95a4b4a9 | 1241 | |
16e903f2 DM |
1242 | # always stop local VM |
1243 | eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); }; | |
1244 | if (my $err = $@) { | |
1245 | $self->log('err', "stopping vm failed - $err"); | |
1246 | $self->{errors} = 1; | |
1247 | } | |
1248 | ||
1249 | # always deactivate volumes - avoid lvm LVs to be active on several nodes | |
1250 | eval { | |
1251 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
1252 | PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist); | |
1253 | }; | |
1254 | if (my $err = $@) { | |
1255 | $self->log('err', $err); | |
1256 | $self->{errors} = 1; | |
1257 | } | |
1258 | ||
4b26ffbf | 1259 | my @not_replicated_volumes = $self->filter_local_volumes(undef, 0); |
9b6efe43 | 1260 | |
4b26ffbf FE |
1261 | # destroy local copies |
1262 | foreach my $volid (@not_replicated_volumes) { | |
ad8b9d5e FE |
1263 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; |
1264 | if (my $err = $@) { | |
1265 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
1266 | $self->{errors} = 1; | |
1267 | last if $err =~ /^interrupted by signal$/; | |
b74cad8a | 1268 | } |
b74cad8a AD |
1269 | } |
1270 | ||
16e903f2 DM |
1271 | # clear migrate lock |
1272 | my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ]; | |
1273 | $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock"); | |
1274 | } | |
1275 | ||
1276 | sub final_cleanup { | |
1277 | my ($self, $vmid) = @_; | |
1278 | ||
1279 | # nothing to do | |
1280 | } | |
1281 | ||
50d8dd5d AD |
1282 | sub round_powerof2 { |
1283 | return 1 if $_[0] < 2; | |
1284 | return 2 << int(log($_[0]-1)/log(2)); | |
1285 | } | |
1286 | ||
16e903f2 | 1287 | 1; |