]>
Commit | Line | Data |
---|---|---|
3ea94c60 | 1 | package PVE::QemuMigrate; |
1ef75254 | 2 | |
1e3baf05 | 3 | use strict; |
3ea94c60 | 4 | use warnings; |
16e903f2 | 5 | use PVE::AbstractMigrate; |
3ea94c60 | 6 | use IO::File; |
1e3baf05 | 7 | use IPC::Open2; |
3ea94c60 DM |
8 | use PVE::INotify; |
9 | use PVE::Cluster; | |
1e3baf05 | 10 | use PVE::Storage; |
3ea94c60 | 11 | use PVE::QemuServer; |
e52bd94c | 12 | use Time::HiRes qw( usleep ); |
1e3baf05 | 13 | |
16e903f2 | 14 | use base qw(PVE::AbstractMigrate); |
1e3baf05 | 15 | |
1ef75254 | 16 | sub fork_command_pipe { |
46a84fd4 | 17 | my ($self, $cmd) = @_; |
19672434 | 18 | |
1ef75254 DM |
19 | my $reader = IO::File->new(); |
20 | my $writer = IO::File->new(); | |
21 | ||
22 | my $orig_pid = $$; | |
23 | ||
24 | my $cpid; | |
25 | ||
26 | eval { $cpid = open2($reader, $writer, @$cmd); }; | |
27 | ||
28 | my $err = $@; | |
29 | ||
30 | # catch exec errors | |
31 | if ($orig_pid != $$) { | |
46a84fd4 | 32 | $self->log('err', "can't fork command pipe\n"); |
19672434 DM |
33 | POSIX::_exit(1); |
34 | kill('KILL', $$); | |
1ef75254 DM |
35 | } |
36 | ||
37 | die $err if $err; | |
38 | ||
39 | return { writer => $writer, reader => $reader, pid => $cpid }; | |
40 | } | |
41 | ||
19672434 | 42 | sub finish_command_pipe { |
97439670 | 43 | my ($self, $cmdpipe, $timeout) = @_; |
1ef75254 DM |
44 | |
45 | my $writer = $cmdpipe->{writer}; | |
46 | my $reader = $cmdpipe->{reader}; | |
47 | ||
48 | $writer->close(); | |
49 | $reader->close(); | |
50 | ||
51 | my $cpid = $cmdpipe->{pid}; | |
52 | ||
97439670 DM |
53 | if ($timeout) { |
54 | for (my $i = 0; $i < $timeout; $i++) { | |
55 | return if !PVE::ProcFSTools::check_process_running($cpid); | |
56 | sleep(1); | |
57 | } | |
58 | } | |
59 | ||
60 | $self->log('info', "ssh tunnel still running - terminating now with SIGTERM\n"); | |
61 | kill(15, $cpid); | |
1ef75254 | 62 | |
97439670 DM |
63 | # wait again |
64 | for (my $i = 0; $i < 10; $i++) { | |
65 | return if !PVE::ProcFSTools::check_process_running($cpid); | |
66 | sleep(1); | |
67 | } | |
68 | ||
69 | $self->log('info', "ssh tunnel still running - terminating now with SIGKILL\n"); | |
70 | kill 9, $cpid; | |
71 | sleep 1; | |
1ef75254 DM |
72 | } |
73 | ||
1e3baf05 | 74 | sub fork_tunnel { |
16e903f2 | 75 | my ($self, $nodeip, $lport, $rport) = @_; |
1e3baf05 | 76 | |
16e903f2 | 77 | my $cmd = [@{$self->{rem_ssh}}, '-L', "$lport:localhost:$rport", |
1e3baf05 | 78 | 'qm', 'mtunnel' ]; |
19672434 | 79 | |
46a84fd4 | 80 | my $tunnel = $self->fork_command_pipe($cmd); |
1e3baf05 DM |
81 | |
82 | my $reader = $tunnel->{reader}; | |
83 | ||
84 | my $helo; | |
19672434 | 85 | eval { |
17eed025 | 86 | PVE::Tools::run_with_timeout(60, sub { $helo = <$reader>; }); |
1e3baf05 | 87 | die "no reply\n" if !$helo; |
1ef75254 | 88 | die "no quorum on target node\n" if $helo =~ m/^no quorum$/; |
19672434 | 89 | die "got strange reply from mtunnel ('$helo')\n" |
1e3baf05 DM |
90 | if $helo !~ m/^tunnel online$/; |
91 | }; | |
92 | my $err = $@; | |
93 | ||
94 | if ($err) { | |
46a84fd4 | 95 | $self->finish_command_pipe($tunnel); |
1e3baf05 DM |
96 | die "can't open migration tunnel - $err"; |
97 | } | |
98 | return $tunnel; | |
99 | } | |
100 | ||
19672434 | 101 | sub finish_tunnel { |
16e903f2 | 102 | my ($self, $tunnel) = @_; |
1e3baf05 DM |
103 | |
104 | my $writer = $tunnel->{writer}; | |
105 | ||
19672434 | 106 | eval { |
17eed025 | 107 | PVE::Tools::run_with_timeout(30, sub { |
1e3baf05 DM |
108 | print $writer "quit\n"; |
109 | $writer->flush(); | |
19672434 | 110 | }); |
1e3baf05 DM |
111 | }; |
112 | my $err = $@; | |
19672434 | 113 | |
97439670 | 114 | $self->finish_command_pipe($tunnel, 30); |
19672434 | 115 | |
1e3baf05 DM |
116 | die $err if $err; |
117 | } | |
118 | ||
16e903f2 DM |
119 | sub lock_vm { |
120 | my ($self, $vmid, $code, @param) = @_; | |
f5eb281a | 121 | |
16e903f2 DM |
122 | return PVE::QemuServer::lock_config($vmid, $code, @param); |
123 | } | |
ff1a2432 | 124 | |
16e903f2 DM |
125 | sub prepare { |
126 | my ($self, $vmid) = @_; | |
ff1a2432 | 127 | |
16e903f2 | 128 | my $online = $self->{opts}->{online}; |
3ea94c60 | 129 | |
16e903f2 | 130 | $self->{storecfg} = PVE::Storage::config(); |
3ea94c60 | 131 | |
16e903f2 DM |
132 | # test is VM exist |
133 | my $conf = $self->{vmconf} = PVE::QemuServer::load_config($vmid); | |
3ea94c60 | 134 | |
16e903f2 | 135 | PVE::QemuServer::check_lock($conf); |
3ea94c60 | 136 | |
16e903f2 DM |
137 | my $running = 0; |
138 | if (my $pid = PVE::QemuServer::check_running($vmid)) { | |
139 | die "cant migrate running VM without --online\n" if !$online; | |
140 | $running = $pid; | |
3ea94c60 DM |
141 | } |
142 | ||
16e903f2 DM |
143 | if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) { |
144 | if ($self->{running} || !$self->{opts}->{force}) { | |
145 | die "can't migrate VM which uses local devices\n"; | |
146 | } else { | |
147 | $self->log('info', "migrating VM which uses local devices"); | |
148 | } | |
3ea94c60 DM |
149 | } |
150 | ||
ff1a2432 DM |
151 | # activate volumes |
152 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
16e903f2 DM |
153 | PVE::Storage::activate_volumes($self->{storecfg}, $vollist); |
154 | ||
155 | # fixme: check if storage is available on both nodes | |
3ea94c60 DM |
156 | |
157 | # test ssh connection | |
16e903f2 DM |
158 | my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ]; |
159 | eval { $self->cmd_quiet($cmd); }; | |
3ea94c60 | 160 | die "Can't connect to destination address using public key\n" if $@; |
ff1a2432 | 161 | |
16e903f2 | 162 | return $running; |
3ea94c60 DM |
163 | } |
164 | ||
165 | sub sync_disks { | |
16e903f2 DM |
166 | my ($self, $vmid) = @_; |
167 | ||
168 | $self->log('info', "copying disk images"); | |
3ea94c60 | 169 | |
16e903f2 DM |
170 | my $conf = $self->{vmconf}; |
171 | ||
172 | $self->{volumes} = []; | |
3ea94c60 DM |
173 | |
174 | my $res = []; | |
175 | ||
176 | eval { | |
177 | ||
178 | my $volhash = {}; | |
179 | my $cdromhash = {}; | |
180 | ||
a06c7f7e DM |
181 | my $sharedvm = 1; |
182 | ||
522c8f97 DM |
183 | my @sids = PVE::Storage::storage_ids($self->{storecfg}); |
184 | foreach my $storeid (@sids) { | |
185 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid); | |
186 | next if $scfg->{shared}; | |
373ea579 DM |
187 | next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1); |
188 | ||
80b2cbd1 AD |
189 | # get list from PVE::Storage (for unused volumes) |
190 | my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid); | |
191 | PVE::Storage::foreach_volid($dl, sub { | |
192 | my ($volid, $sid, $volname) = @_; | |
193 | ||
373ea579 | 194 | # check if storage is available on target node |
80b2cbd1 AD |
195 | PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node}); |
196 | ||
197 | $volhash->{$volid} = 1; | |
a06c7f7e | 198 | $sharedvm = 0; # there is a non-shared disk |
80b2cbd1 AD |
199 | }); |
200 | } | |
3ea94c60 | 201 | |
d5769dc2 | 202 | # and add used, owned/non-shared disks (just to be sure we have all) |
3ea94c60 | 203 | |
d5769dc2 DM |
204 | PVE::QemuServer::foreach_volid($conf, sub { |
205 | my ($volid, $is_cdrom) = @_; | |
3ea94c60 | 206 | |
3ea94c60 DM |
207 | return if !$volid; |
208 | ||
209 | die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|; | |
210 | ||
d5769dc2 | 211 | if ($is_cdrom) { |
3ea94c60 DM |
212 | die "cant migrate local cdrom drive\n" if $volid eq 'cdrom'; |
213 | return if $volid eq 'none'; | |
214 | $cdromhash->{$volid} = 1; | |
215 | } | |
216 | ||
217 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
218 | ||
16e903f2 DM |
219 | # check if storage is available on both nodes |
220 | my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid); | |
221 | PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node}); | |
3ea94c60 DM |
222 | |
223 | return if $scfg->{shared}; | |
224 | ||
225 | die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid}; | |
226 | ||
227 | $sharedvm = 0; | |
228 | ||
16e903f2 | 229 | my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid); |
3ea94c60 DM |
230 | |
231 | die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n" | |
16e903f2 | 232 | if !$owner || ($owner != $self->{vmid}); |
3ea94c60 DM |
233 | |
234 | $volhash->{$volid} = 1; | |
235 | }); | |
236 | ||
16e903f2 | 237 | if ($self->{running} && !$sharedvm) { |
3ea94c60 DM |
238 | die "can't do online migration - VM uses local disks\n"; |
239 | } | |
240 | ||
241 | # do some checks first | |
242 | foreach my $volid (keys %$volhash) { | |
243 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
16e903f2 | 244 | my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid); |
3ea94c60 DM |
245 | |
246 | die "can't migrate '$volid' - storagy type '$scfg->{type}' not supported\n" | |
247 | if $scfg->{type} ne 'dir'; | |
248 | } | |
249 | ||
250 | foreach my $volid (keys %$volhash) { | |
251 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
16e903f2 DM |
252 | push @{$self->{volumes}}, $volid; |
253 | PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid); | |
3ea94c60 DM |
254 | } |
255 | }; | |
256 | die "Failed to sync data - $@" if $@; | |
257 | } | |
258 | ||
1e3baf05 | 259 | sub phase1 { |
16e903f2 | 260 | my ($self, $vmid) = @_; |
1e3baf05 | 261 | |
16e903f2 | 262 | $self->log('info', "starting migration of VM $vmid to node '$self->{node}' ($self->{nodeip})"); |
1e3baf05 | 263 | |
16e903f2 | 264 | my $conf = $self->{vmconf}; |
1e3baf05 DM |
265 | |
266 | # set migrate lock in config file | |
1858638f DM |
267 | $conf->{lock} = 'migrate'; |
268 | PVE::QemuServer::update_config_nolock($vmid, $conf, 1); | |
1e3baf05 | 269 | |
16e903f2 | 270 | sync_disks($self, $vmid); |
1ef75254 | 271 | |
1e3baf05 DM |
272 | }; |
273 | ||
16e903f2 DM |
274 | sub phase1_cleanup { |
275 | my ($self, $vmid, $err) = @_; | |
276 | ||
277 | $self->log('info', "aborting phase 1 - cleanup resources"); | |
278 | ||
1858638f DM |
279 | my $conf = $self->{vmconf}; |
280 | delete $conf->{lock}; | |
281 | eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) }; | |
16e903f2 DM |
282 | if (my $err = $@) { |
283 | $self->log('err', $err); | |
284 | } | |
f5eb281a | 285 | |
16e903f2 DM |
286 | if ($self->{volumes}) { |
287 | foreach my $volid (@{$self->{volumes}}) { | |
288 | $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'"); | |
289 | # fixme: try to remove ? | |
290 | } | |
291 | } | |
292 | } | |
293 | ||
1e3baf05 | 294 | sub phase2 { |
16e903f2 | 295 | my ($self, $vmid) = @_; |
1e3baf05 | 296 | |
16e903f2 DM |
297 | my $conf = $self->{vmconf}; |
298 | ||
46a84fd4 | 299 | $self->log('info', "starting VM $vmid on remote node '$self->{node}'"); |
1e3baf05 DM |
300 | |
301 | my $rport; | |
302 | ||
7e8dcf2c AD |
303 | my $nodename = PVE::INotify::nodename(); |
304 | ||
19672434 | 305 | ## start on remote node |
7e8dcf2c AD |
306 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'start', |
307 | $vmid, '--stateuri', 'tcp', '--skiplock', '--migratedfrom', $nodename]; | |
1e3baf05 | 308 | |
72afda82 | 309 | PVE::Tools::run_command($cmd, outfunc => sub { |
1e3baf05 DM |
310 | my $line = shift; |
311 | ||
312 | if ($line =~ m/^migration listens on port (\d+)$/) { | |
313 | $rport = $1; | |
314 | } | |
72afda82 | 315 | }, errfunc => sub {}); |
1e3baf05 DM |
316 | |
317 | die "unable to detect remote migration port\n" if !$rport; | |
318 | ||
16e903f2 | 319 | $self->log('info', "starting migration tunnel"); |
1ef75254 | 320 | |
1e3baf05 | 321 | ## create tunnel to remote port |
1ef75254 | 322 | my $lport = PVE::QemuServer::next_migrate_port(); |
16e903f2 | 323 | $self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport); |
1e3baf05 | 324 | |
d68afb26 | 325 | $self->log('info', "starting online/live migration on port $lport"); |
1e3baf05 DM |
326 | # start migration |
327 | ||
328 | my $start = time(); | |
e18b0b99 AD |
329 | |
330 | my $capabilities = {}; | |
331 | $capabilities->{capability} = "xbzrle"; | |
972511a0 | 332 | $capabilities->{state} = JSON::false; |
e18b0b99 AD |
333 | |
334 | eval { | |
335 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-capabilities", capabilities => [$capabilities]); | |
336 | }; | |
337 | ||
338 | #set cachesize 10% of the total memory | |
339 | my $cachesize = int($conf->{memory}*1048576/10); | |
340 | eval { | |
341 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => $cachesize); | |
342 | }; | |
343 | ||
5a7835f5 AD |
344 | eval { |
345 | PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:localhost:$lport"); | |
346 | }; | |
347 | my $merr = $@; | |
1e3baf05 | 348 | |
a05b47a8 | 349 | my $lstat = 0; |
e52bd94c AD |
350 | my $usleep = 2000000; |
351 | my $i = 0; | |
b0b756c1 | 352 | my $err_count = 0; |
1e3baf05 | 353 | while (1) { |
e52bd94c AD |
354 | $i++; |
355 | my $avglstat = $lstat/$i if $lstat; | |
356 | ||
b0b756c1 DM |
357 | usleep($usleep); |
358 | my $stat; | |
359 | eval { | |
360 | $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate"); | |
361 | }; | |
362 | if (my $err = $@) { | |
363 | $err_count++; | |
364 | warn "query migrate failed: $err\n"; | |
365 | if ($err_count <= 5) { | |
366 | usleep(1000000); | |
367 | next; | |
368 | } | |
369 | die "too many query migrate failures - aborting\n"; | |
370 | } | |
5a7835f5 | 371 | if ($stat->{status} =~ m/^(active|completed|failed|cancelled)$/im) { |
d68afb26 | 372 | $merr = undef; |
b0b756c1 | 373 | $err_count = 0; |
5a7835f5 | 374 | if ($stat->{status} eq 'completed') { |
1e3baf05 DM |
375 | my $delay = time() - $start; |
376 | if ($delay > 0) { | |
377 | my $mbps = sprintf "%.2f", $conf->{memory}/$delay; | |
16e903f2 | 378 | $self->log('info', "migration speed: $mbps MB/s"); |
1e3baf05 DM |
379 | } |
380 | } | |
f5eb281a | 381 | |
5a7835f5 | 382 | if ($stat->{status} eq 'failed' || $stat->{status} eq 'cancelled') { |
1e3baf05 DM |
383 | die "aborting\n" |
384 | } | |
385 | ||
a05b47a8 DM |
386 | if ($stat->{status} ne 'active') { |
387 | $self->log('info', "migration status: $stat->{status}"); | |
388 | last; | |
389 | } | |
390 | ||
391 | if ($stat->{ram}->{transferred} ne $lstat) { | |
392 | my $trans = $stat->{ram}->{transferred} || 0; | |
393 | my $rem = $stat->{ram}->{remaining} || 0; | |
394 | my $total = $stat->{ram}->{total} || 0; | |
e18b0b99 AD |
395 | my $xbzrlecachesize = $stat->{"xbzrle-cache"}->{"cache-size"} || 0; |
396 | my $xbzrlebytes = $stat->{"xbzrle-cache"}->{"bytes"} || 0; | |
397 | my $xbzrlepages = $stat->{"xbzrle-cache"}->{"pages"} || 0; | |
398 | my $xbzrlecachemiss = $stat->{"xbzrle-cache"}->{"cache-miss"} || 0; | |
399 | my $xbzrleoverflow = $stat->{"xbzrle-cache"}->{"overflow"} || 0; | |
e52bd94c | 400 | #reduce sleep if remainig memory if lower than the everage transfert |
94235c59 | 401 | $usleep = 300000 if $avglstat && $rem < $avglstat; |
a05b47a8 DM |
402 | |
403 | $self->log('info', "migration status: $stat->{status} (transferred ${trans}, " . | |
404 | "remaining ${rem}), total ${total})"); | |
e18b0b99 | 405 | |
972511a0 | 406 | #$self->log('info', "migration xbzrle cachesize: ${xbzrlecachesize} transferred ${xbzrlebytes} pages ${xbzrlepages} cachemiss ${xbzrlecachemiss} overflow ${xbzrleoverflow}"); |
a05b47a8 DM |
407 | } |
408 | ||
409 | $lstat = $stat->{ram}->{transferred}; | |
e52bd94c | 410 | |
1e3baf05 | 411 | } else { |
d68afb26 | 412 | die $merr if $merr; |
5a7835f5 | 413 | die "unable to parse migration status '$stat->{status}' - aborting\n"; |
1e3baf05 | 414 | } |
a05b47a8 | 415 | } |
1e3baf05 | 416 | } |
16e903f2 | 417 | |
c04b5b04 AD |
418 | sub phase2_cleanup { |
419 | my ($self, $vmid, $err) = @_; | |
420 | ||
af30308f DM |
421 | return if !$self->{errors}; |
422 | $self->{phase2errors} = 1; | |
423 | ||
c04b5b04 AD |
424 | $self->log('info', "aborting phase 2 - cleanup resources"); |
425 | ||
426 | my $conf = $self->{vmconf}; | |
427 | delete $conf->{lock}; | |
428 | eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) }; | |
429 | if (my $err = $@) { | |
430 | $self->log('err', $err); | |
431 | } | |
432 | ||
af30308f DM |
433 | # cleanup ressources on target host |
434 | my $nodename = PVE::INotify::nodename(); | |
435 | ||
436 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename]; | |
437 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
438 | if (my $err = $@) { | |
439 | $self->log('err', $err); | |
440 | $self->{errors} = 1; | |
441 | } | |
c04b5b04 AD |
442 | } |
443 | ||
16e903f2 DM |
444 | sub phase3 { |
445 | my ($self, $vmid) = @_; | |
f5eb281a | 446 | |
16e903f2 | 447 | my $volids = $self->{volumes}; |
af30308f | 448 | return if $self->{phase2errors}; |
16e903f2 DM |
449 | |
450 | # destroy local copies | |
451 | foreach my $volid (@$volids) { | |
452 | eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; | |
453 | if (my $err = $@) { | |
454 | $self->log('err', "removing local copy of '$volid' failed - $err"); | |
455 | $self->{errors} = 1; | |
456 | last if $err =~ /^interrupted by signal$/; | |
457 | } | |
458 | } | |
16e903f2 DM |
459 | } |
460 | ||
461 | sub phase3_cleanup { | |
462 | my ($self, $vmid, $err) = @_; | |
463 | ||
464 | my $conf = $self->{vmconf}; | |
af30308f | 465 | return if $self->{phase2errors}; |
16e903f2 | 466 | |
b8d20802 AD |
467 | # move config to remote node |
468 | my $conffile = PVE::QemuServer::config_file($vmid); | |
469 | my $newconffile = PVE::QemuServer::config_file($vmid, $self->{node}); | |
470 | ||
471 | die "Failed to move config to node '$self->{node}' - rename failed: $!\n" | |
472 | if !rename($conffile, $newconffile); | |
473 | ||
f5eb281a | 474 | # now that config file is move, we can resume vm on target if livemigrate |
b67900f1 | 475 | if ($self->{tunnel}) { |
b67900f1 AD |
476 | my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock']; |
477 | eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) }; | |
f5eb281a | 478 | if (my $err = $@) { |
b67900f1 AD |
479 | $self->log('err', $err); |
480 | $self->{errors} = 1; | |
481 | } | |
482 | } | |
483 | ||
16e903f2 DM |
484 | # always stop local VM |
485 | eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); }; | |
486 | if (my $err = $@) { | |
487 | $self->log('err', "stopping vm failed - $err"); | |
488 | $self->{errors} = 1; | |
489 | } | |
490 | ||
97439670 DM |
491 | if ($self->{tunnel}) { |
492 | eval { finish_tunnel($self, $self->{tunnel}); }; | |
493 | if (my $err = $@) { | |
494 | $self->log('err', $err); | |
495 | $self->{errors} = 1; | |
496 | } | |
497 | } | |
498 | ||
16e903f2 DM |
499 | # always deactivate volumes - avoid lvm LVs to be active on several nodes |
500 | eval { | |
501 | my $vollist = PVE::QemuServer::get_vm_volumes($conf); | |
502 | PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist); | |
503 | }; | |
504 | if (my $err = $@) { | |
505 | $self->log('err', $err); | |
506 | $self->{errors} = 1; | |
507 | } | |
508 | ||
509 | # clear migrate lock | |
510 | my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ]; | |
511 | $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock"); | |
512 | } | |
513 | ||
514 | sub final_cleanup { | |
515 | my ($self, $vmid) = @_; | |
516 | ||
517 | # nothing to do | |
518 | } | |
519 | ||
520 | 1; |