]> git.proxmox.com Git - pve-container.git/blob - src/PVE/LXC/Migrate.pm
switch order of disk checks
[pve-container.git] / src / PVE / LXC / Migrate.pm
1 package PVE::LXC::Migrate;
2
3 use strict;
4 use warnings;
5 use PVE::AbstractMigrate;
6 use File::Basename;
7 use File::Copy; # fixme: remove
8 use PVE::Tools;
9 use PVE::INotify;
10 use PVE::Cluster;
11 use PVE::Storage;
12 use PVE::LXC;
13
14 use base qw(PVE::AbstractMigrate);
15
16 sub lock_vm {
17 my ($self, $vmid, $code, @param) = @_;
18
19 return PVE::LXC::Config->lock_config($vmid, $code, @param);
20 }
21
22 sub prepare {
23 my ($self, $vmid) = @_;
24
25 my $online = $self->{opts}->{online};
26
27 $self->{storecfg} = PVE::Storage::config();
28
29 # test if CT exists
30 my $conf = $self->{vmconf} = PVE::LXC::Config->load_config($vmid);
31
32 PVE::LXC::Config->check_lock($conf);
33
34 my $running = 0;
35 if (PVE::LXC::check_running($vmid)) {
36 die "lxc live migration is currently not implemented\n";
37
38 die "can't migrate running container without --online\n" if !$online;
39 $running = 1;
40 }
41
42 my $force = $self->{opts}->{force} // 0;
43 my $need_activate = [];
44
45 PVE::LXC::Config->foreach_mountpoint($conf, sub {
46 my ($ms, $mountpoint) = @_;
47
48 my $volid = $mountpoint->{volume};
49
50 # skip dev/bind mps when forced
51 if ($mountpoint->{type} ne 'volume' && $force) {
52 return;
53 }
54 my ($storage, $volname) = PVE::Storage::parse_volume_id($volid, 1) if $volid;
55 die "can't determine assigned storage for mountpoint '$ms'\n" if !$storage;
56
57 # check if storage is available on both nodes
58 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $storage);
59 PVE::Storage::storage_check_node($self->{storecfg}, $storage, $self->{node});
60
61
62 if ($scfg->{shared}) {
63 # PVE::Storage::activate_storage checks this for non-shared storages
64 my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
65 warn "Used shared storage '$storage' is not online on source node!\n"
66 if !$plugin->check_connection($storage, $scfg);
67 } else {
68 # only activate if not shared
69 push @$need_activate, $volid;
70
71 die "unable to migrate local mountpoint '$volid' while CT is running"
72 if $running;
73 }
74
75 });
76
77 PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);
78
79 # todo: test if VM uses local resources
80
81 # test ssh connection
82 my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
83 eval { $self->cmd_quiet($cmd); };
84 die "Can't connect to destination address using public key\n" if $@;
85
86 return $running;
87 }
88
89 sub phase1 {
90 my ($self, $vmid) = @_;
91
92 $self->log('info', "starting migration of CT $self->{vmid} to node '$self->{node}' ($self->{nodeip})");
93
94 my $conf = $self->{vmconf};
95 $conf->{lock} = 'migrate';
96 PVE::LXC::Config->write_config($vmid, $conf);
97
98 if ($self->{running}) {
99 $self->log('info', "container is running - using online migration");
100 }
101
102 $self->{volumes} = []; # list of already migrated volumes
103 my $volhash = {}; # 'config', 'snapshot' or 'storage' for local volumes
104
105 my $test_volid = sub {
106 my ($volid, $snapname) = @_;
107
108 return if !$volid;
109
110 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
111
112 # check if storage is available on both nodes
113 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
114 PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
115
116 return if $scfg->{shared};
117
118 $volhash->{$volid} = defined($snapname) ? 'snapshot' : 'config';
119
120 my ($path, $owner) = PVE::Storage::path($self->{storecfg}, $volid);
121
122 die "can't migrate volume '$volid' - owned by other guest (owner = $owner)\n"
123 if !$owner || ($owner != $self->{vmid});
124
125 if (defined($snapname)) {
126 # we cannot migrate shapshots on local storage
127 # exceptions: 'zfspool'
128 if (($scfg->{type} eq 'zfspool')) {
129 return;
130 }
131 die "can't migrate snapshot of local volume '$volid'\n";
132 }
133 };
134
135 my $test_mp = sub {
136 my ($ms, $mountpoint, $snapname) = @_;
137
138 my $volid = $mountpoint->{volume};
139 # already checked in prepare
140 if ($mountpoint->{type} ne 'volume') {
141 $self->log('info', "ignoring mountpoint '$ms' ('$volid') of type " .
142 "'$mountpoint->{type}', migration is forced.")
143 if !$snapname;
144 return;
145 }
146
147 my ($storage, $volname) = PVE::Storage::parse_volume_id($volid);
148 my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $storage);
149
150 if (!$scfg->{shared}) {
151 $self->log('info', "copy mountpoint '$ms' ($volid) to node ' $self->{node}'")
152 if !$snapname;
153 } else {
154 $self->log('info', "mountpoint '$ms' is on shared storage '$storage'")
155 if !$snapname;
156 }
157 &$test_volid($volid, $snapname);
158 };
159
160 # first unused / lost volumes owned by this container
161 my @sids = PVE::Storage::storage_ids($self->{storecfg});
162 foreach my $storeid (@sids) {
163 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
164 next if $scfg->{shared};
165 next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
166
167 # get list from PVE::Storage (for unused volumes)
168 my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
169
170 next if @{$dl->{$storeid}} == 0;
171
172 # check if storage is available on target node
173 PVE::Storage::storage_check_node($self->{storecfg}, $storeid, $self->{node});
174
175 PVE::Storage::foreach_volid($dl, sub {
176 my ($volid, $sid, $volname) = @_;
177
178 $volhash->{$volid} = 'storage';
179 });
180 }
181
182 # then all volumes referenced in snapshots
183 foreach my $snapname (keys %{$conf->{snapshots}}) {
184 &$test_volid($conf->{snapshots}->{$snapname}->{'vmstate'}, 0, undef)
185 if defined($conf->{snapshots}->{$snapname}->{'vmstate'});
186 PVE::LXC::Config->foreach_mountpoint($conf->{snapshots}->{$snapname}, $test_mp, $snapname);
187 }
188
189 # finally all currently used volumes
190 PVE::LXC::Config->foreach_mountpoint($conf, $test_mp);
191
192
193 # additional checks for local storage
194 foreach my $volid (keys %$volhash) {
195 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
196 my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
197
198 my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool') ||
199 ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm');
200
201 die "can't migrate '$volid' - storage type '$scfg->{type}' not supported\n"
202 if !$migratable;
203
204 # image is a linked clone on local storage, se we can't migrate.
205 if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
206 die "can't migrate '$volid' as it's a clone of '$basename'";
207 }
208 }
209
210 foreach my $volid (sort keys %$volhash) {
211 if ($volhash->{$volid} eq 'storage') {
212 $self->log('info', "found local volume '$volid' (via storage)\n");
213 } elsif ($volhash->{$volid} eq 'config') {
214 $self->log('info', "found local volume '$volid' (in current VM config)\n");
215 } elsif ($volhash->{$volid} eq 'snapshot') {
216 $self->log('info', "found local volume '$volid' (referenced by snapshot(s))\n");
217 } else {
218 $self->log('info', "found local volume '$volid'\n");
219 }
220 }
221
222 foreach my $volid (keys %$volhash) {
223 my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
224 push @{$self->{volumes}}, $volid;
225 PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
226 }
227
228 my $conffile = PVE::LXC::Config->config_file($vmid);
229 my $newconffile = PVE::LXC::Config->config_file($vmid, $self->{node});
230
231 if ($self->{running}) {
232 die "implement me";
233 }
234
235 # make sure everything on (shared) storage is unmounted
236 # Note: we must be 100% sure, else we get data corruption because
237 # non-shared file system could be mounted twice (on shared storage)
238
239 PVE::LXC::umount_all($vmid, $self->{storecfg}, $conf);
240
241 #to be sure there are no active volumes
242 my $vollist = PVE::LXC::Config->get_vm_volumes($conf);
243 PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
244
245 # move config
246 die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
247 if !rename($conffile, $newconffile);
248
249 $self->{conf_migrated} = 1;
250 }
251
252 sub phase1_cleanup {
253 my ($self, $vmid, $err) = @_;
254
255 $self->log('info', "aborting phase 1 - cleanup resources");
256
257 if ($self->{volumes}) {
258 foreach my $volid (@{$self->{volumes}}) {
259 $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'");
260 # fixme: try to remove ?
261 }
262 }
263 }
264
265 sub phase3 {
266 my ($self, $vmid) = @_;
267
268 my $volids = $self->{volumes};
269
270 # destroy local copies
271 foreach my $volid (@$volids) {
272 eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
273 if (my $err = $@) {
274 $self->log('err', "removing local copy of '$volid' failed - $err");
275 $self->{errors} = 1;
276 last if $err =~ /^interrupted by signal$/;
277 }
278 }
279 }
280
281 sub final_cleanup {
282 my ($self, $vmid) = @_;
283
284 $self->log('info', "start final cleanup");
285
286 if (!$self->{conf_migrated}) {
287 my $conf = $self->{vmconf};
288 delete $conf->{lock};
289
290 eval { PVE::LXC::Config->write_config($vmid, $conf); };
291 if (my $err = $@) {
292 $self->log('err', $err);
293 }
294 } else {
295 my $cmd = [ @{$self->{rem_ssh}}, 'pct', 'unlock', $vmid ];
296 $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
297 }
298 }
299
300 1;