]> git.proxmox.com Git - pve-manager.git/blame - PVE/CLI/pveceph.pm
pveceph: drop special for octopus and pacific, unsupported in 8.x
[pve-manager.git] / PVE / CLI / pveceph.pm
CommitLineData
3e8560ac
DM
1package PVE::CLI::pveceph;
2
3use strict;
4use warnings;
5
6use Fcntl ':flock';
7use File::Path;
8use IO::File;
9use JSON;
10use Data::Dumper;
11use LWP::UserAgent;
12
13use PVE::SafeSyslog;
14use PVE::Cluster;
15use PVE::INotify;
16use PVE::RPCEnvironment;
17use PVE::Storage;
18use PVE::Tools qw(run_command);
19use PVE::JSONSchema qw(get_standard_option);
6fb08cb9 20use PVE::Ceph::Tools;
91dfa228 21use PVE::Ceph::Services;
3e8560ac 22use PVE::API2::Ceph;
7e1a9d25 23use PVE::API2::Ceph::FS;
b82649cc 24use PVE::API2::Ceph::MDS;
4fec2764 25use PVE::API2::Ceph::MGR;
98fe93ae 26use PVE::API2::Ceph::MON;
79fa41a2 27use PVE::API2::Ceph::OSD;
3e8560ac
DM
28
29use PVE::CLIHandler;
30
31use base qw(PVE::CLIHandler);
32
33my $nodename = PVE::INotify::nodename();
34
35my $upid_exit = sub {
36 my $upid = shift;
37 my $status = PVE::Tools::upid_read_status($upid);
4bb46baa 38 exit(PVE::Tools::upid_status_is_error($status) ? -1 : 0);
3e8560ac
DM
39};
40
7e017024
DM
41sub setup_environment {
42 PVE::RPCEnvironment->setup_default_cli_env();
43}
44
3e8560ac
DM
45__PACKAGE__->register_method ({
46 name => 'purge',
47 path => 'purge',
48 method => 'POST',
49 description => "Destroy ceph related data and configuration files.",
50 parameters => {
51 additionalProperties => 0,
52 properties => {
91dfa228
AA
53 logs => {
54 description => 'Additionally purge Ceph logs, /var/log/ceph.',
55 type => 'boolean',
56 optional => 1,
57 },
58 crash => {
59 description => 'Additionally purge Ceph crash logs, /var/lib/ceph/crash.',
60 type => 'boolean',
61 optional => 1,
62 },
3e8560ac
DM
63 },
64 },
65 returns => { type => 'null' },
66 code => sub {
67 my ($param) = @_;
68
91dfa228
AA
69 my $message;
70 my $pools = [];
71 my $monstat = {};
72 my $mdsstat = {};
73 my $osdstat = [];
3e8560ac
DM
74
75 eval {
76 my $rados = PVE::RADOS->new();
91dfa228
AA
77 $pools = PVE::Ceph::Tools::ls_pools(undef, $rados);
78 $monstat = PVE::Ceph::Services::get_services_info('mon', undef, $rados);
79 $mdsstat = PVE::Ceph::Services::get_services_info('mds', undef, $rados);
80 $osdstat = $rados->mon_command({ prefix => 'osd metadata' });
3e8560ac 81 };
c3a3f3ab 82 warn "Error gathering ceph info, already purged? Message: $@" if $@;
3e8560ac 83
91dfa228
AA
84 my $osd = grep { $_->{hostname} eq $nodename } @$osdstat;
85 my $mds = grep { $mdsstat->{$_}->{host} eq $nodename } keys %$mdsstat;
86 my $mon = grep { $monstat->{$_}->{host} eq $nodename } keys %$monstat;
3e8560ac 87
91dfa228
AA
88 # no pools = no data
89 $message .= "- remove pools, this will !!DESTROY DATA!!\n" if @$pools;
90 $message .= "- remove active OSD on $nodename\n" if $osd;
91 $message .= "- remove active MDS on $nodename\n" if $mds;
92 $message .= "- remove other MONs, $nodename is not the last MON\n"
93 if scalar(keys %$monstat) > 1 && $mon;
94
95 # display all steps at once
96 die "Unable to purge Ceph!\n\nTo continue:\n$message" if $message;
97
98 my $services = PVE::Ceph::Services::get_local_services();
99 $services->{mon} = $monstat if $mon;
100 $services->{crash}->{$nodename} = { direxists => 1 } if $param->{crash};
101 $services->{logs}->{$nodename} = { direxists => 1 } if $param->{logs};
102
103 PVE::Ceph::Tools::purge_all_ceph_services($services);
104 PVE::Ceph::Tools::purge_all_ceph_files($services);
3e8560ac
DM
105
106 return undef;
107 }});
108
74ea1fb2
TL
109my $supported_ceph_versions = ['quincy'];
110my $default_ceph_version = 'quincy';
983921b9 111
3e8560ac
DM
112__PACKAGE__->register_method ({
113 name => 'install',
114 path => 'install',
115 method => 'POST',
116 description => "Install ceph related packages.",
117 parameters => {
118 additionalProperties => 0,
119 properties => {
120 version => {
121 type => 'string',
983921b9
TL
122 enum => $supported_ceph_versions,
123 default => $default_ceph_version,
c3d9c698 124 description => "Ceph version to install.",
3e8560ac 125 optional => 1,
61819403
TL
126 },
127 'allow-experimental' => {
128 type => 'boolean',
129 default => 0,
130 optional => 1,
131 description => "Allow experimental versions. Use with care!",
132 },
3f7d054e
TL
133 'test-repository' => {
134 type => 'boolean',
135 default => 0,
136 optional => 1,
137 description => "Use the test, not the main repository. Use with care!",
138 },
3e8560ac
DM
139 },
140 },
141 returns => { type => 'null' },
142 code => sub {
143 my ($param) = @_;
144
983921b9 145 my $cephver = $param->{version} || $default_ceph_version;
3e8560ac 146
3f7d054e
TL
147 my $repo = $param->{'test-repository'} ? 'test' : 'main';
148
c3d9c698 149 my $repolist;
74ea1fb2
TL
150 if ($cephver eq 'quincy') {
151 $repolist = "deb http://download.proxmox.com/debian/ceph-quincy bookworm $repo\n";
caf37855 152 } else {
3f7d054e 153 die "unsupported ceph version: $cephver";
caf37855 154 }
c3d9c698
TL
155 PVE::Tools::file_set_contents("/etc/apt/sources.list.d/ceph.list", $repolist);
156
7271e6f6
TL
157 my $supported_re = join('|', $supported_ceph_versions->@*);
158 warn "WARNING: installing non-default ceph release '$cephver'!\n" if $cephver !~ qr/^(?:$supported_re)$/;
3e8560ac 159
87166f36 160 local $ENV{DEBIAN_FRONTEND} = 'noninteractive';
3e8560ac 161 print "update available package list\n";
d26556c0
TL
162 eval {
163 run_command(
164 ['apt-get', '-q', 'update'],
165 outfunc => sub {},
166 errfunc => sub { print STDERR "$_[0]\n" },
167 )
168 };
3e8560ac 169
ecddd2e2 170 my @apt_install = qw(apt-get --no-install-recommends -o Dpkg::Options::=--force-confnew install --);
2ca75379
TL
171 my @ceph_packages = qw(
172 ceph
173 ceph-common
2ca75379 174 ceph-fuse
ddd89279
TL
175 ceph-mds
176 ceph-volume
2ca75379 177 gdisk
08e22c1e 178 nvme-cli
2ca75379
TL
179 );
180
55ab726e 181 print "start installation\n";
4dd27d50 182
d380d000 183 # this flag helps to determine when apt is actually done installing (vs. partial extracing)
9f6dc075 184 my $install_flag_fn = PVE::Ceph::Tools::ceph_install_flag_file();
d380d000 185 open(my $install_flag, '>', $install_flag_fn) or die "could not create install flag - $!\n";
4dd27d50
AL
186 close $install_flag;
187
2ca75379 188 if (system(@apt_install, @ceph_packages) != 0) {
d380d000 189 unlink $install_flag_fn or warn "could not remove Ceph installation flag - $!";
2ca75379
TL
190 die "apt failed during ceph installation ($?)\n";
191 }
55ab726e 192
37bf860e 193 print "\ninstalled ceph $cephver successfully!\n";
d380d000
TL
194 # done: drop flag file so that the PVE::Ceph::Tools check returns Ok now.
195 unlink $install_flag_fn or warn "could not remove Ceph installation flag - $!";
ece49b3c
TL
196
197 print "\nreloading API to load new Ceph RADOS library...\n";
198 run_command([
199 'systemctl', 'try-reload-or-restart', 'pvedaemon.service', 'pveproxy.service'
200 ]);
3e8560ac
DM
201
202 return undef;
203 }});
204
1baeb2d1
AL
205__PACKAGE__->register_method ({
206 name => 'status',
207 path => 'status',
208 method => 'GET',
209 description => "Get Ceph Status.",
210 parameters => {
211 additionalProperties => 0,
212 },
213 returns => { type => 'null' },
214 code => sub {
215 PVE::Ceph::Tools::check_ceph_inited();
216
217 run_command(
218 ['ceph', '-s'],
219 outfunc => sub { print "$_[0]\n" },
ab459f6d
TL
220 errfunc => sub { print STDERR "$_[0]\n" },
221 timeout => 15,
1baeb2d1
AL
222 );
223 return undef;
224 }});
225
02c1e98e
DC
226my $get_storages = sub {
227 my ($fs, $is_default) = @_;
228
229 my $cfg = PVE::Storage::config();
230
231 my $storages = $cfg->{ids};
232 my $res = {};
233 foreach my $storeid (keys %$storages) {
234 my $curr = $storages->{$storeid};
235 next if $curr->{type} ne 'cephfs';
236 my $cur_fs = $curr->{'fs-name'};
237 $res->{$storeid} = $storages->{$storeid}
238 if (!defined($cur_fs) && $is_default) || (defined($cur_fs) && $fs eq $cur_fs);
239 }
240
241 return $res;
242};
243
244__PACKAGE__->register_method ({
245 name => 'destroyfs',
246 path => 'destroyfs',
247 method => 'DELETE',
248 description => "Destroy a Ceph filesystem",
249 parameters => {
250 additionalProperties => 0,
251 properties => {
252 node => get_standard_option('pve-node'),
253 name => {
254 description => "The ceph filesystem name.",
255 type => 'string',
256 },
257 'remove-storages' => {
258 description => "Remove all pveceph-managed storages configured for this fs.",
259 type => 'boolean',
260 optional => 1,
261 default => 0,
262 },
263 'remove-pools' => {
264 description => "Remove data and metadata pools configured for this fs.",
265 type => 'boolean',
266 optional => 1,
267 default => 0,
268 },
269 },
270 },
271 returns => { type => 'string' },
272 code => sub {
273 my ($param) = @_;
274
275 PVE::Ceph::Tools::check_ceph_inited();
276
277 my $rpcenv = PVE::RPCEnvironment::get();
278 my $user = $rpcenv->get_user();
279
280 my $fs_name = $param->{name};
281
282 my $fs;
283 my $fs_list = PVE::Ceph::Tools::ls_fs();
284 for my $entry (@$fs_list) {
285 next if $entry->{name} ne $fs_name;
286 $fs = $entry;
287 last;
288 }
289 die "no such cephfs '$fs_name'\n" if !$fs;
290
291 my $worker = sub {
292 my $rados = PVE::RADOS->new();
293
294 if ($param->{'remove-storages'}) {
295 my $defaultfs;
296 my $fs_dump = $rados->mon_command({ prefix => "fs dump" });
297 for my $fs ($fs_dump->{filesystems}->@*) {
298 next if $fs->{id} != $fs_dump->{default_fscid};
299 $defaultfs = $fs->{mdsmap}->{fs_name};
300 }
301 warn "no default fs found, maybe not all relevant storages are removed\n"
302 if !defined($defaultfs);
303
304 my $storages = $get_storages->($fs_name, $fs_name eq ($defaultfs // ''));
305 for my $storeid (keys %$storages) {
306 my $store = $storages->{$storeid};
307 if (!$store->{disable}) {
308 die "storage '$storeid' is not disabled, make sure to disable ".
309 "and unmount the storage first\n";
310 }
311 }
312
313 my $err;
314 for my $storeid (keys %$storages) {
315 # skip external clusters, not managed by pveceph
316 next if $storages->{$storeid}->{monhost};
317 eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
318 if ($@) {
319 warn "failed to remove storage '$storeid': $@\n";
320 $err = 1;
321 }
322 }
323 die "failed to remove (some) storages - check log and remove manually!\n"
324 if $err;
325 }
326
327 PVE::Ceph::Tools::destroy_fs($fs_name, $rados);
328
329 if ($param->{'remove-pools'}) {
330 warn "removing metadata pool '$fs->{metadata_pool}'\n";
331 eval { PVE::Ceph::Tools::destroy_pool($fs->{metadata_pool}, $rados) };
332 warn "$@\n" if $@;
333
334 foreach my $pool ($fs->{data_pools}->@*) {
335 warn "removing data pool '$pool'\n";
336 eval { PVE::Ceph::Tools::destroy_pool($pool, $rados) };
337 warn "$@\n" if $@;
338 }
339 }
340
341 };
342 return $rpcenv->fork_worker('cephdestroyfs', $fs_name, $user, $worker);
343 }});
344
3e8560ac
DM
345our $cmddef = {
346 init => [ 'PVE::API2::Ceph', 'init', [], { node => $nodename } ],
8c476782 347 pool => {
a0665001 348 ls => [ 'PVE::API2::Ceph::Pool', 'lspools', [], { node => $nodename }, sub {
d4dba076
AA
349 my ($data, $schema, $options) = @_;
350 PVE::CLIFormatter::print_api_result($data, $schema,
351 [
352 'pool_name',
353 'size',
354 'min_size',
355 'pg_num',
5a3d7942
AA
356 'pg_num_min',
357 'pg_num_final',
d4dba076 358 'pg_autoscale_mode',
5a3d7942
AA
359 'target_size',
360 'target_size_ratio',
d4dba076
AA
361 'crush_rule_name',
362 'percent_used',
363 'bytes_used',
364 ],
365 $options);
366 }, $PVE::RESTHandler::standard_output_options],
a0665001
AL
367 create => [ 'PVE::API2::Ceph::Pool', 'createpool', ['name'], { node => $nodename }],
368 destroy => [ 'PVE::API2::Ceph::Pool', 'destroypool', ['name'], { node => $nodename } ],
369 set => [ 'PVE::API2::Ceph::Pool', 'setpool', ['name'], { node => $nodename } ],
370 get => [ 'PVE::API2::Ceph::Pool', 'getpool', ['name'], { node => $nodename }, sub {
54ba7dd9
AA
371 my ($data, $schema, $options) = @_;
372 PVE::CLIFormatter::print_api_result($data, $schema, undef, $options);
373 }, $PVE::RESTHandler::standard_output_options],
8c476782
TL
374 },
375 lspools => { alias => 'pool ls' },
376 createpool => { alias => 'pool create' },
377 destroypool => { alias => 'pool destroy' },
378 fs => {
379 create => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
02c1e98e 380 destroy => [ __PACKAGE__, 'destroyfs', ['name'], { node => $nodename }],
8c476782
TL
381 },
382 osd => {
79fa41a2
DC
383 create => [ 'PVE::API2::Ceph::OSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
384 destroy => [ 'PVE::API2::Ceph::OSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit],
8c476782
TL
385 },
386 createosd => { alias => 'osd create' },
387 destroyosd => { alias => 'osd destroy' },
388 mon => {
98fe93ae
DC
389 create => [ 'PVE::API2::Ceph::MON', 'createmon', [], { node => $nodename }, $upid_exit],
390 destroy => [ 'PVE::API2::Ceph::MON', 'destroymon', ['monid'], { node => $nodename }, $upid_exit],
8c476782
TL
391 },
392 createmon => { alias => 'mon create' },
393 destroymon => { alias => 'mon destroy' },
394 mgr => {
4fec2764
DC
395 create => [ 'PVE::API2::Ceph::MGR', 'createmgr', [], { node => $nodename }, $upid_exit],
396 destroy => [ 'PVE::API2::Ceph::MGR', 'destroymgr', ['id'], { node => $nodename }, $upid_exit],
8c476782
TL
397 },
398 createmgr => { alias => 'mgr create' },
399 destroymgr => { alias => 'mgr destroy' },
400 mds => {
401 create => [ 'PVE::API2::Ceph::MDS', 'createmds', [], { node => $nodename }, $upid_exit],
f16bb531 402 destroy => [ 'PVE::API2::Ceph::MDS', 'destroymds', ['name'], { node => $nodename }, $upid_exit],
8c476782 403 },
7da6ff26
DJ
404 start => [ 'PVE::API2::Ceph', 'start', [], { node => $nodename }, $upid_exit],
405 stop => [ 'PVE::API2::Ceph', 'stop', [], { node => $nodename }, $upid_exit],
3e8560ac
DM
406 install => [ __PACKAGE__, 'install', [] ],
407 purge => [ __PACKAGE__, 'purge', [] ],
1baeb2d1 408 status => [ __PACKAGE__, 'status', []],
3e8560ac
DM
409};
410
4111;