]> git.proxmox.com Git - pve-manager.git/blame - PVE/CLI/pveceph.pm
pveceph: reuse supported ceph versions for non-default warning
[pve-manager.git] / PVE / CLI / pveceph.pm
CommitLineData
3e8560ac
DM
1package PVE::CLI::pveceph;
2
3use strict;
4use warnings;
5
6use Fcntl ':flock';
7use File::Path;
8use IO::File;
9use JSON;
10use Data::Dumper;
11use LWP::UserAgent;
12
13use PVE::SafeSyslog;
14use PVE::Cluster;
15use PVE::INotify;
16use PVE::RPCEnvironment;
17use PVE::Storage;
18use PVE::Tools qw(run_command);
19use PVE::JSONSchema qw(get_standard_option);
6fb08cb9 20use PVE::Ceph::Tools;
91dfa228 21use PVE::Ceph::Services;
3e8560ac 22use PVE::API2::Ceph;
7e1a9d25 23use PVE::API2::Ceph::FS;
b82649cc 24use PVE::API2::Ceph::MDS;
4fec2764 25use PVE::API2::Ceph::MGR;
98fe93ae 26use PVE::API2::Ceph::MON;
79fa41a2 27use PVE::API2::Ceph::OSD;
3e8560ac
DM
28
29use PVE::CLIHandler;
30
31use base qw(PVE::CLIHandler);
32
33my $nodename = PVE::INotify::nodename();
34
35my $upid_exit = sub {
36 my $upid = shift;
37 my $status = PVE::Tools::upid_read_status($upid);
4bb46baa 38 exit(PVE::Tools::upid_status_is_error($status) ? -1 : 0);
3e8560ac
DM
39};
40
7e017024
DM
41sub setup_environment {
42 PVE::RPCEnvironment->setup_default_cli_env();
43}
44
3e8560ac
DM
45__PACKAGE__->register_method ({
46 name => 'purge',
47 path => 'purge',
48 method => 'POST',
49 description => "Destroy ceph related data and configuration files.",
50 parameters => {
51 additionalProperties => 0,
52 properties => {
91dfa228
AA
53 logs => {
54 description => 'Additionally purge Ceph logs, /var/log/ceph.',
55 type => 'boolean',
56 optional => 1,
57 },
58 crash => {
59 description => 'Additionally purge Ceph crash logs, /var/lib/ceph/crash.',
60 type => 'boolean',
61 optional => 1,
62 },
3e8560ac
DM
63 },
64 },
65 returns => { type => 'null' },
66 code => sub {
67 my ($param) = @_;
68
91dfa228
AA
69 my $message;
70 my $pools = [];
71 my $monstat = {};
72 my $mdsstat = {};
73 my $osdstat = [];
3e8560ac
DM
74
75 eval {
76 my $rados = PVE::RADOS->new();
91dfa228
AA
77 $pools = PVE::Ceph::Tools::ls_pools(undef, $rados);
78 $monstat = PVE::Ceph::Services::get_services_info('mon', undef, $rados);
79 $mdsstat = PVE::Ceph::Services::get_services_info('mds', undef, $rados);
80 $osdstat = $rados->mon_command({ prefix => 'osd metadata' });
3e8560ac 81 };
c3a3f3ab 82 warn "Error gathering ceph info, already purged? Message: $@" if $@;
3e8560ac 83
91dfa228
AA
84 my $osd = grep { $_->{hostname} eq $nodename } @$osdstat;
85 my $mds = grep { $mdsstat->{$_}->{host} eq $nodename } keys %$mdsstat;
86 my $mon = grep { $monstat->{$_}->{host} eq $nodename } keys %$monstat;
3e8560ac 87
91dfa228
AA
88 # no pools = no data
89 $message .= "- remove pools, this will !!DESTROY DATA!!\n" if @$pools;
90 $message .= "- remove active OSD on $nodename\n" if $osd;
91 $message .= "- remove active MDS on $nodename\n" if $mds;
92 $message .= "- remove other MONs, $nodename is not the last MON\n"
93 if scalar(keys %$monstat) > 1 && $mon;
94
95 # display all steps at once
96 die "Unable to purge Ceph!\n\nTo continue:\n$message" if $message;
97
98 my $services = PVE::Ceph::Services::get_local_services();
99 $services->{mon} = $monstat if $mon;
100 $services->{crash}->{$nodename} = { direxists => 1 } if $param->{crash};
101 $services->{logs}->{$nodename} = { direxists => 1 } if $param->{logs};
102
103 PVE::Ceph::Tools::purge_all_ceph_services($services);
104 PVE::Ceph::Tools::purge_all_ceph_files($services);
3e8560ac
DM
105
106 return undef;
107 }});
108
983921b9
TL
109my $supported_ceph_versions = ['octopus', 'pacific', 'quincy'];
110my $default_ceph_version = 'pacific';
111
3e8560ac
DM
112__PACKAGE__->register_method ({
113 name => 'install',
114 path => 'install',
115 method => 'POST',
116 description => "Install ceph related packages.",
117 parameters => {
118 additionalProperties => 0,
119 properties => {
120 version => {
121 type => 'string',
983921b9
TL
122 enum => $supported_ceph_versions,
123 default => $default_ceph_version,
c3d9c698 124 description => "Ceph version to install.",
3e8560ac 125 optional => 1,
61819403
TL
126 },
127 'allow-experimental' => {
128 type => 'boolean',
129 default => 0,
130 optional => 1,
131 description => "Allow experimental versions. Use with care!",
132 },
3f7d054e
TL
133 'test-repository' => {
134 type => 'boolean',
135 default => 0,
136 optional => 1,
137 description => "Use the test, not the main repository. Use with care!",
138 },
3e8560ac
DM
139 },
140 },
141 returns => { type => 'null' },
142 code => sub {
143 my ($param) = @_;
144
983921b9 145 my $cephver = $param->{version} || $default_ceph_version;
3e8560ac 146
3f7d054e
TL
147 my $repo = $param->{'test-repository'} ? 'test' : 'main';
148
c3d9c698 149 my $repolist;
00fa70bf 150 if ($cephver eq 'octopus') {
ef25743f 151 warn "Ceph Octopus will go EOL after 2022-07\n";
3f7d054e 152 $repolist = "deb http://download.proxmox.com/debian/ceph-octopus bullseye $repo\n";
00fa70bf 153 } elsif ($cephver eq 'pacific') {
3f7d054e 154 $repolist = "deb http://download.proxmox.com/debian/ceph-pacific bullseye $repo\n";
9e81f364
TL
155 } elsif ($cephver eq 'quincy') {
156 $repolist = "deb http://download.proxmox.com/debian/ceph-quincy bullseye $repo\n";
caf37855 157 } else {
3f7d054e 158 die "unsupported ceph version: $cephver";
caf37855 159 }
c3d9c698
TL
160 PVE::Tools::file_set_contents("/etc/apt/sources.list.d/ceph.list", $repolist);
161
7271e6f6
TL
162 my $supported_re = join('|', $supported_ceph_versions->@*);
163 warn "WARNING: installing non-default ceph release '$cephver'!\n" if $cephver !~ qr/^(?:$supported_re)$/;
3e8560ac 164
87166f36 165 local $ENV{DEBIAN_FRONTEND} = 'noninteractive';
3e8560ac 166 print "update available package list\n";
d26556c0
TL
167 eval {
168 run_command(
169 ['apt-get', '-q', 'update'],
170 outfunc => sub {},
171 errfunc => sub { print STDERR "$_[0]\n" },
172 )
173 };
3e8560ac 174
ecddd2e2 175 my @apt_install = qw(apt-get --no-install-recommends -o Dpkg::Options::=--force-confnew install --);
2ca75379
TL
176 my @ceph_packages = qw(
177 ceph
178 ceph-common
179 ceph-mds
180 ceph-fuse
181 gdisk
08e22c1e 182 nvme-cli
2ca75379
TL
183 );
184
55ab726e 185 print "start installation\n";
4dd27d50 186
d380d000 187 # this flag helps to determine when apt is actually done installing (vs. partial extracing)
9f6dc075 188 my $install_flag_fn = PVE::Ceph::Tools::ceph_install_flag_file();
d380d000 189 open(my $install_flag, '>', $install_flag_fn) or die "could not create install flag - $!\n";
4dd27d50
AL
190 close $install_flag;
191
2ca75379 192 if (system(@apt_install, @ceph_packages) != 0) {
d380d000 193 unlink $install_flag_fn or warn "could not remove Ceph installation flag - $!";
2ca75379
TL
194 die "apt failed during ceph installation ($?)\n";
195 }
55ab726e 196
37bf860e 197 print "\ninstalled ceph $cephver successfully!\n";
d380d000
TL
198 # done: drop flag file so that the PVE::Ceph::Tools check returns Ok now.
199 unlink $install_flag_fn or warn "could not remove Ceph installation flag - $!";
ece49b3c
TL
200
201 print "\nreloading API to load new Ceph RADOS library...\n";
202 run_command([
203 'systemctl', 'try-reload-or-restart', 'pvedaemon.service', 'pveproxy.service'
204 ]);
3e8560ac
DM
205
206 return undef;
207 }});
208
1baeb2d1
AL
209__PACKAGE__->register_method ({
210 name => 'status',
211 path => 'status',
212 method => 'GET',
213 description => "Get Ceph Status.",
214 parameters => {
215 additionalProperties => 0,
216 },
217 returns => { type => 'null' },
218 code => sub {
219 PVE::Ceph::Tools::check_ceph_inited();
220
221 run_command(
222 ['ceph', '-s'],
223 outfunc => sub { print "$_[0]\n" },
ab459f6d
TL
224 errfunc => sub { print STDERR "$_[0]\n" },
225 timeout => 15,
1baeb2d1
AL
226 );
227 return undef;
228 }});
229
02c1e98e
DC
230my $get_storages = sub {
231 my ($fs, $is_default) = @_;
232
233 my $cfg = PVE::Storage::config();
234
235 my $storages = $cfg->{ids};
236 my $res = {};
237 foreach my $storeid (keys %$storages) {
238 my $curr = $storages->{$storeid};
239 next if $curr->{type} ne 'cephfs';
240 my $cur_fs = $curr->{'fs-name'};
241 $res->{$storeid} = $storages->{$storeid}
242 if (!defined($cur_fs) && $is_default) || (defined($cur_fs) && $fs eq $cur_fs);
243 }
244
245 return $res;
246};
247
248__PACKAGE__->register_method ({
249 name => 'destroyfs',
250 path => 'destroyfs',
251 method => 'DELETE',
252 description => "Destroy a Ceph filesystem",
253 parameters => {
254 additionalProperties => 0,
255 properties => {
256 node => get_standard_option('pve-node'),
257 name => {
258 description => "The ceph filesystem name.",
259 type => 'string',
260 },
261 'remove-storages' => {
262 description => "Remove all pveceph-managed storages configured for this fs.",
263 type => 'boolean',
264 optional => 1,
265 default => 0,
266 },
267 'remove-pools' => {
268 description => "Remove data and metadata pools configured for this fs.",
269 type => 'boolean',
270 optional => 1,
271 default => 0,
272 },
273 },
274 },
275 returns => { type => 'string' },
276 code => sub {
277 my ($param) = @_;
278
279 PVE::Ceph::Tools::check_ceph_inited();
280
281 my $rpcenv = PVE::RPCEnvironment::get();
282 my $user = $rpcenv->get_user();
283
284 my $fs_name = $param->{name};
285
286 my $fs;
287 my $fs_list = PVE::Ceph::Tools::ls_fs();
288 for my $entry (@$fs_list) {
289 next if $entry->{name} ne $fs_name;
290 $fs = $entry;
291 last;
292 }
293 die "no such cephfs '$fs_name'\n" if !$fs;
294
295 my $worker = sub {
296 my $rados = PVE::RADOS->new();
297
298 if ($param->{'remove-storages'}) {
299 my $defaultfs;
300 my $fs_dump = $rados->mon_command({ prefix => "fs dump" });
301 for my $fs ($fs_dump->{filesystems}->@*) {
302 next if $fs->{id} != $fs_dump->{default_fscid};
303 $defaultfs = $fs->{mdsmap}->{fs_name};
304 }
305 warn "no default fs found, maybe not all relevant storages are removed\n"
306 if !defined($defaultfs);
307
308 my $storages = $get_storages->($fs_name, $fs_name eq ($defaultfs // ''));
309 for my $storeid (keys %$storages) {
310 my $store = $storages->{$storeid};
311 if (!$store->{disable}) {
312 die "storage '$storeid' is not disabled, make sure to disable ".
313 "and unmount the storage first\n";
314 }
315 }
316
317 my $err;
318 for my $storeid (keys %$storages) {
319 # skip external clusters, not managed by pveceph
320 next if $storages->{$storeid}->{monhost};
321 eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
322 if ($@) {
323 warn "failed to remove storage '$storeid': $@\n";
324 $err = 1;
325 }
326 }
327 die "failed to remove (some) storages - check log and remove manually!\n"
328 if $err;
329 }
330
331 PVE::Ceph::Tools::destroy_fs($fs_name, $rados);
332
333 if ($param->{'remove-pools'}) {
334 warn "removing metadata pool '$fs->{metadata_pool}'\n";
335 eval { PVE::Ceph::Tools::destroy_pool($fs->{metadata_pool}, $rados) };
336 warn "$@\n" if $@;
337
338 foreach my $pool ($fs->{data_pools}->@*) {
339 warn "removing data pool '$pool'\n";
340 eval { PVE::Ceph::Tools::destroy_pool($pool, $rados) };
341 warn "$@\n" if $@;
342 }
343 }
344
345 };
346 return $rpcenv->fork_worker('cephdestroyfs', $fs_name, $user, $worker);
347 }});
348
3e8560ac
DM
349our $cmddef = {
350 init => [ 'PVE::API2::Ceph', 'init', [], { node => $nodename } ],
8c476782 351 pool => {
56d02a86 352 ls => [ 'PVE::API2::Ceph::Pools', 'lspools', [], { node => $nodename }, sub {
d4dba076
AA
353 my ($data, $schema, $options) = @_;
354 PVE::CLIFormatter::print_api_result($data, $schema,
355 [
356 'pool_name',
357 'size',
358 'min_size',
359 'pg_num',
5a3d7942
AA
360 'pg_num_min',
361 'pg_num_final',
d4dba076 362 'pg_autoscale_mode',
5a3d7942
AA
363 'target_size',
364 'target_size_ratio',
d4dba076
AA
365 'crush_rule_name',
366 'percent_used',
367 'bytes_used',
368 ],
369 $options);
370 }, $PVE::RESTHandler::standard_output_options],
56d02a86
AA
371 create => [ 'PVE::API2::Ceph::Pools', 'createpool', ['name'], { node => $nodename }],
372 destroy => [ 'PVE::API2::Ceph::Pools', 'destroypool', ['name'], { node => $nodename } ],
373 set => [ 'PVE::API2::Ceph::Pools', 'setpool', ['name'], { node => $nodename } ],
54ba7dd9
AA
374 get => [ 'PVE::API2::Ceph::Pools', 'getpool', ['name'], { node => $nodename }, sub {
375 my ($data, $schema, $options) = @_;
376 PVE::CLIFormatter::print_api_result($data, $schema, undef, $options);
377 }, $PVE::RESTHandler::standard_output_options],
8c476782
TL
378 },
379 lspools => { alias => 'pool ls' },
380 createpool => { alias => 'pool create' },
381 destroypool => { alias => 'pool destroy' },
382 fs => {
383 create => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
02c1e98e 384 destroy => [ __PACKAGE__, 'destroyfs', ['name'], { node => $nodename }],
8c476782
TL
385 },
386 osd => {
79fa41a2
DC
387 create => [ 'PVE::API2::Ceph::OSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
388 destroy => [ 'PVE::API2::Ceph::OSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit],
8c476782
TL
389 },
390 createosd => { alias => 'osd create' },
391 destroyosd => { alias => 'osd destroy' },
392 mon => {
98fe93ae
DC
393 create => [ 'PVE::API2::Ceph::MON', 'createmon', [], { node => $nodename }, $upid_exit],
394 destroy => [ 'PVE::API2::Ceph::MON', 'destroymon', ['monid'], { node => $nodename }, $upid_exit],
8c476782
TL
395 },
396 createmon => { alias => 'mon create' },
397 destroymon => { alias => 'mon destroy' },
398 mgr => {
4fec2764
DC
399 create => [ 'PVE::API2::Ceph::MGR', 'createmgr', [], { node => $nodename }, $upid_exit],
400 destroy => [ 'PVE::API2::Ceph::MGR', 'destroymgr', ['id'], { node => $nodename }, $upid_exit],
8c476782
TL
401 },
402 createmgr => { alias => 'mgr create' },
403 destroymgr => { alias => 'mgr destroy' },
404 mds => {
405 create => [ 'PVE::API2::Ceph::MDS', 'createmds', [], { node => $nodename }, $upid_exit],
f16bb531 406 destroy => [ 'PVE::API2::Ceph::MDS', 'destroymds', ['name'], { node => $nodename }, $upid_exit],
8c476782 407 },
7da6ff26
DJ
408 start => [ 'PVE::API2::Ceph', 'start', [], { node => $nodename }, $upid_exit],
409 stop => [ 'PVE::API2::Ceph', 'stop', [], { node => $nodename }, $upid_exit],
3e8560ac
DM
410 install => [ __PACKAGE__, 'install', [] ],
411 purge => [ __PACKAGE__, 'purge', [] ],
1baeb2d1 412 status => [ __PACKAGE__, 'status', []],
3e8560ac
DM
413};
414
4151;