]> git.proxmox.com Git - pve-manager.git/blame - PVE/CLI/pveceph.pm
pveceph: add 'fs destroy' command
[pve-manager.git] / PVE / CLI / pveceph.pm
CommitLineData
3e8560ac
DM
1package PVE::CLI::pveceph;
2
3use strict;
4use warnings;
5
6use Fcntl ':flock';
7use File::Path;
8use IO::File;
9use JSON;
10use Data::Dumper;
11use LWP::UserAgent;
12
13use PVE::SafeSyslog;
14use PVE::Cluster;
15use PVE::INotify;
16use PVE::RPCEnvironment;
17use PVE::Storage;
18use PVE::Tools qw(run_command);
19use PVE::JSONSchema qw(get_standard_option);
6fb08cb9 20use PVE::Ceph::Tools;
91dfa228 21use PVE::Ceph::Services;
3e8560ac 22use PVE::API2::Ceph;
7e1a9d25 23use PVE::API2::Ceph::FS;
b82649cc 24use PVE::API2::Ceph::MDS;
4fec2764 25use PVE::API2::Ceph::MGR;
98fe93ae 26use PVE::API2::Ceph::MON;
79fa41a2 27use PVE::API2::Ceph::OSD;
3e8560ac
DM
28
29use PVE::CLIHandler;
30
31use base qw(PVE::CLIHandler);
32
33my $nodename = PVE::INotify::nodename();
34
35my $upid_exit = sub {
36 my $upid = shift;
37 my $status = PVE::Tools::upid_read_status($upid);
4bb46baa 38 exit(PVE::Tools::upid_status_is_error($status) ? -1 : 0);
3e8560ac
DM
39};
40
7e017024
DM
41sub setup_environment {
42 PVE::RPCEnvironment->setup_default_cli_env();
43}
44
3e8560ac
DM
45__PACKAGE__->register_method ({
46 name => 'purge',
47 path => 'purge',
48 method => 'POST',
49 description => "Destroy ceph related data and configuration files.",
50 parameters => {
51 additionalProperties => 0,
52 properties => {
91dfa228
AA
53 logs => {
54 description => 'Additionally purge Ceph logs, /var/log/ceph.',
55 type => 'boolean',
56 optional => 1,
57 },
58 crash => {
59 description => 'Additionally purge Ceph crash logs, /var/lib/ceph/crash.',
60 type => 'boolean',
61 optional => 1,
62 },
3e8560ac
DM
63 },
64 },
65 returns => { type => 'null' },
66 code => sub {
67 my ($param) = @_;
68
91dfa228
AA
69 my $message;
70 my $pools = [];
71 my $monstat = {};
72 my $mdsstat = {};
73 my $osdstat = [];
3e8560ac
DM
74
75 eval {
76 my $rados = PVE::RADOS->new();
91dfa228
AA
77 $pools = PVE::Ceph::Tools::ls_pools(undef, $rados);
78 $monstat = PVE::Ceph::Services::get_services_info('mon', undef, $rados);
79 $mdsstat = PVE::Ceph::Services::get_services_info('mds', undef, $rados);
80 $osdstat = $rados->mon_command({ prefix => 'osd metadata' });
3e8560ac 81 };
c3a3f3ab 82 warn "Error gathering ceph info, already purged? Message: $@" if $@;
3e8560ac 83
91dfa228
AA
84 my $osd = grep { $_->{hostname} eq $nodename } @$osdstat;
85 my $mds = grep { $mdsstat->{$_}->{host} eq $nodename } keys %$mdsstat;
86 my $mon = grep { $monstat->{$_}->{host} eq $nodename } keys %$monstat;
3e8560ac 87
91dfa228
AA
88 # no pools = no data
89 $message .= "- remove pools, this will !!DESTROY DATA!!\n" if @$pools;
90 $message .= "- remove active OSD on $nodename\n" if $osd;
91 $message .= "- remove active MDS on $nodename\n" if $mds;
92 $message .= "- remove other MONs, $nodename is not the last MON\n"
93 if scalar(keys %$monstat) > 1 && $mon;
94
95 # display all steps at once
96 die "Unable to purge Ceph!\n\nTo continue:\n$message" if $message;
97
98 my $services = PVE::Ceph::Services::get_local_services();
99 $services->{mon} = $monstat if $mon;
100 $services->{crash}->{$nodename} = { direxists => 1 } if $param->{crash};
101 $services->{logs}->{$nodename} = { direxists => 1 } if $param->{logs};
102
103 PVE::Ceph::Tools::purge_all_ceph_services($services);
104 PVE::Ceph::Tools::purge_all_ceph_files($services);
3e8560ac
DM
105
106 return undef;
107 }});
108
109__PACKAGE__->register_method ({
110 name => 'install',
111 path => 'install',
112 method => 'POST',
113 description => "Install ceph related packages.",
114 parameters => {
115 additionalProperties => 0,
116 properties => {
117 version => {
118 type => 'string',
00fa70bf
TL
119 enum => ['octopus', 'pacific'],
120 default => 'pacific',
c3d9c698 121 description => "Ceph version to install.",
3e8560ac 122 optional => 1,
61819403
TL
123 },
124 'allow-experimental' => {
125 type => 'boolean',
126 default => 0,
127 optional => 1,
128 description => "Allow experimental versions. Use with care!",
129 },
3f7d054e
TL
130 'test-repository' => {
131 type => 'boolean',
132 default => 0,
133 optional => 1,
134 description => "Use the test, not the main repository. Use with care!",
135 },
3e8560ac
DM
136 },
137 },
138 returns => { type => 'null' },
139 code => sub {
140 my ($param) = @_;
141
00fa70bf 142 my $cephver = $param->{version} || 'pacific'; # NOTE: always change default here too!
3e8560ac 143
3f7d054e
TL
144 my $repo = $param->{'test-repository'} ? 'test' : 'main';
145
c3d9c698 146 my $repolist;
00fa70bf 147 if ($cephver eq 'octopus') {
3f7d054e 148 $repolist = "deb http://download.proxmox.com/debian/ceph-octopus bullseye $repo\n";
00fa70bf 149 } elsif ($cephver eq 'pacific') {
3f7d054e 150 $repolist = "deb http://download.proxmox.com/debian/ceph-pacific bullseye $repo\n";
caf37855 151 } else {
3f7d054e 152 die "unsupported ceph version: $cephver";
caf37855 153 }
c3d9c698
TL
154 PVE::Tools::file_set_contents("/etc/apt/sources.list.d/ceph.list", $repolist);
155
d26556c0 156 warn "WARNING: installing non-default ceph release '$cephver'!\n"
00fa70bf 157 if $cephver !~ qr/^(?:octopus|pacific)$/;
3e8560ac 158
87166f36 159 local $ENV{DEBIAN_FRONTEND} = 'noninteractive';
3e8560ac 160 print "update available package list\n";
d26556c0
TL
161 eval {
162 run_command(
163 ['apt-get', '-q', 'update'],
164 outfunc => sub {},
165 errfunc => sub { print STDERR "$_[0]\n" },
166 )
167 };
3e8560ac 168
ecddd2e2 169 my @apt_install = qw(apt-get --no-install-recommends -o Dpkg::Options::=--force-confnew install --);
2ca75379
TL
170 my @ceph_packages = qw(
171 ceph
172 ceph-common
173 ceph-mds
174 ceph-fuse
175 gdisk
08e22c1e 176 nvme-cli
2ca75379
TL
177 );
178
55ab726e 179 print "start installation\n";
4dd27d50 180
d380d000 181 # this flag helps to determine when apt is actually done installing (vs. partial extracing)
9f6dc075 182 my $install_flag_fn = PVE::Ceph::Tools::ceph_install_flag_file();
d380d000 183 open(my $install_flag, '>', $install_flag_fn) or die "could not create install flag - $!\n";
4dd27d50
AL
184 close $install_flag;
185
2ca75379 186 if (system(@apt_install, @ceph_packages) != 0) {
d380d000 187 unlink $install_flag_fn or warn "could not remove Ceph installation flag - $!";
2ca75379
TL
188 die "apt failed during ceph installation ($?)\n";
189 }
55ab726e 190
37bf860e 191 print "\ninstalled ceph $cephver successfully!\n";
d380d000
TL
192 # done: drop flag file so that the PVE::Ceph::Tools check returns Ok now.
193 unlink $install_flag_fn or warn "could not remove Ceph installation flag - $!";
ece49b3c
TL
194
195 print "\nreloading API to load new Ceph RADOS library...\n";
196 run_command([
197 'systemctl', 'try-reload-or-restart', 'pvedaemon.service', 'pveproxy.service'
198 ]);
3e8560ac
DM
199
200 return undef;
201 }});
202
1baeb2d1
AL
203__PACKAGE__->register_method ({
204 name => 'status',
205 path => 'status',
206 method => 'GET',
207 description => "Get Ceph Status.",
208 parameters => {
209 additionalProperties => 0,
210 },
211 returns => { type => 'null' },
212 code => sub {
213 PVE::Ceph::Tools::check_ceph_inited();
214
215 run_command(
216 ['ceph', '-s'],
217 outfunc => sub { print "$_[0]\n" },
ab459f6d
TL
218 errfunc => sub { print STDERR "$_[0]\n" },
219 timeout => 15,
1baeb2d1
AL
220 );
221 return undef;
222 }});
223
02c1e98e
DC
224my $get_storages = sub {
225 my ($fs, $is_default) = @_;
226
227 my $cfg = PVE::Storage::config();
228
229 my $storages = $cfg->{ids};
230 my $res = {};
231 foreach my $storeid (keys %$storages) {
232 my $curr = $storages->{$storeid};
233 next if $curr->{type} ne 'cephfs';
234 my $cur_fs = $curr->{'fs-name'};
235 $res->{$storeid} = $storages->{$storeid}
236 if (!defined($cur_fs) && $is_default) || (defined($cur_fs) && $fs eq $cur_fs);
237 }
238
239 return $res;
240};
241
242__PACKAGE__->register_method ({
243 name => 'destroyfs',
244 path => 'destroyfs',
245 method => 'DELETE',
246 description => "Destroy a Ceph filesystem",
247 parameters => {
248 additionalProperties => 0,
249 properties => {
250 node => get_standard_option('pve-node'),
251 name => {
252 description => "The ceph filesystem name.",
253 type => 'string',
254 },
255 'remove-storages' => {
256 description => "Remove all pveceph-managed storages configured for this fs.",
257 type => 'boolean',
258 optional => 1,
259 default => 0,
260 },
261 'remove-pools' => {
262 description => "Remove data and metadata pools configured for this fs.",
263 type => 'boolean',
264 optional => 1,
265 default => 0,
266 },
267 },
268 },
269 returns => { type => 'string' },
270 code => sub {
271 my ($param) = @_;
272
273 PVE::Ceph::Tools::check_ceph_inited();
274
275 my $rpcenv = PVE::RPCEnvironment::get();
276 my $user = $rpcenv->get_user();
277
278 my $fs_name = $param->{name};
279
280 my $fs;
281 my $fs_list = PVE::Ceph::Tools::ls_fs();
282 for my $entry (@$fs_list) {
283 next if $entry->{name} ne $fs_name;
284 $fs = $entry;
285 last;
286 }
287 die "no such cephfs '$fs_name'\n" if !$fs;
288
289 my $worker = sub {
290 my $rados = PVE::RADOS->new();
291
292 if ($param->{'remove-storages'}) {
293 my $defaultfs;
294 my $fs_dump = $rados->mon_command({ prefix => "fs dump" });
295 for my $fs ($fs_dump->{filesystems}->@*) {
296 next if $fs->{id} != $fs_dump->{default_fscid};
297 $defaultfs = $fs->{mdsmap}->{fs_name};
298 }
299 warn "no default fs found, maybe not all relevant storages are removed\n"
300 if !defined($defaultfs);
301
302 my $storages = $get_storages->($fs_name, $fs_name eq ($defaultfs // ''));
303 for my $storeid (keys %$storages) {
304 my $store = $storages->{$storeid};
305 if (!$store->{disable}) {
306 die "storage '$storeid' is not disabled, make sure to disable ".
307 "and unmount the storage first\n";
308 }
309 }
310
311 my $err;
312 for my $storeid (keys %$storages) {
313 # skip external clusters, not managed by pveceph
314 next if $storages->{$storeid}->{monhost};
315 eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
316 if ($@) {
317 warn "failed to remove storage '$storeid': $@\n";
318 $err = 1;
319 }
320 }
321 die "failed to remove (some) storages - check log and remove manually!\n"
322 if $err;
323 }
324
325 PVE::Ceph::Tools::destroy_fs($fs_name, $rados);
326
327 if ($param->{'remove-pools'}) {
328 warn "removing metadata pool '$fs->{metadata_pool}'\n";
329 eval { PVE::Ceph::Tools::destroy_pool($fs->{metadata_pool}, $rados) };
330 warn "$@\n" if $@;
331
332 foreach my $pool ($fs->{data_pools}->@*) {
333 warn "removing data pool '$pool'\n";
334 eval { PVE::Ceph::Tools::destroy_pool($pool, $rados) };
335 warn "$@\n" if $@;
336 }
337 }
338
339 };
340 return $rpcenv->fork_worker('cephdestroyfs', $fs_name, $user, $worker);
341 }});
342
3e8560ac
DM
343our $cmddef = {
344 init => [ 'PVE::API2::Ceph', 'init', [], { node => $nodename } ],
8c476782 345 pool => {
56d02a86 346 ls => [ 'PVE::API2::Ceph::Pools', 'lspools', [], { node => $nodename }, sub {
d4dba076
AA
347 my ($data, $schema, $options) = @_;
348 PVE::CLIFormatter::print_api_result($data, $schema,
349 [
350 'pool_name',
351 'size',
352 'min_size',
353 'pg_num',
5a3d7942
AA
354 'pg_num_min',
355 'pg_num_final',
d4dba076 356 'pg_autoscale_mode',
5a3d7942
AA
357 'target_size',
358 'target_size_ratio',
d4dba076
AA
359 'crush_rule_name',
360 'percent_used',
361 'bytes_used',
362 ],
363 $options);
364 }, $PVE::RESTHandler::standard_output_options],
56d02a86
AA
365 create => [ 'PVE::API2::Ceph::Pools', 'createpool', ['name'], { node => $nodename }],
366 destroy => [ 'PVE::API2::Ceph::Pools', 'destroypool', ['name'], { node => $nodename } ],
367 set => [ 'PVE::API2::Ceph::Pools', 'setpool', ['name'], { node => $nodename } ],
54ba7dd9
AA
368 get => [ 'PVE::API2::Ceph::Pools', 'getpool', ['name'], { node => $nodename }, sub {
369 my ($data, $schema, $options) = @_;
370 PVE::CLIFormatter::print_api_result($data, $schema, undef, $options);
371 }, $PVE::RESTHandler::standard_output_options],
8c476782
TL
372 },
373 lspools => { alias => 'pool ls' },
374 createpool => { alias => 'pool create' },
375 destroypool => { alias => 'pool destroy' },
376 fs => {
377 create => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
02c1e98e 378 destroy => [ __PACKAGE__, 'destroyfs', ['name'], { node => $nodename }],
8c476782
TL
379 },
380 osd => {
79fa41a2
DC
381 create => [ 'PVE::API2::Ceph::OSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
382 destroy => [ 'PVE::API2::Ceph::OSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit],
8c476782
TL
383 },
384 createosd => { alias => 'osd create' },
385 destroyosd => { alias => 'osd destroy' },
386 mon => {
98fe93ae
DC
387 create => [ 'PVE::API2::Ceph::MON', 'createmon', [], { node => $nodename }, $upid_exit],
388 destroy => [ 'PVE::API2::Ceph::MON', 'destroymon', ['monid'], { node => $nodename }, $upid_exit],
8c476782
TL
389 },
390 createmon => { alias => 'mon create' },
391 destroymon => { alias => 'mon destroy' },
392 mgr => {
4fec2764
DC
393 create => [ 'PVE::API2::Ceph::MGR', 'createmgr', [], { node => $nodename }, $upid_exit],
394 destroy => [ 'PVE::API2::Ceph::MGR', 'destroymgr', ['id'], { node => $nodename }, $upid_exit],
8c476782
TL
395 },
396 createmgr => { alias => 'mgr create' },
397 destroymgr => { alias => 'mgr destroy' },
398 mds => {
399 create => [ 'PVE::API2::Ceph::MDS', 'createmds', [], { node => $nodename }, $upid_exit],
f16bb531 400 destroy => [ 'PVE::API2::Ceph::MDS', 'destroymds', ['name'], { node => $nodename }, $upid_exit],
8c476782 401 },
7da6ff26
DJ
402 start => [ 'PVE::API2::Ceph', 'start', [], { node => $nodename }, $upid_exit],
403 stop => [ 'PVE::API2::Ceph', 'stop', [], { node => $nodename }, $upid_exit],
3e8560ac
DM
404 install => [ __PACKAGE__, 'install', [] ],
405 purge => [ __PACKAGE__, 'purge', [] ],
1baeb2d1 406 status => [ __PACKAGE__, 'status', []],
3e8560ac
DM
407};
408
4091;