return undef;
}});
+my $supported_ceph_versions = ['octopus', 'pacific', 'quincy'];
+my $default_ceph_version = 'pacific';
+
__PACKAGE__->register_method ({
name => 'install',
path => 'install',
properties => {
version => {
type => 'string',
- enum => ['octopus', 'pacific'],
- default => 'pacific',
+ enum => $supported_ceph_versions,
+ default => $default_ceph_version,
description => "Ceph version to install.",
optional => 1,
},
code => sub {
my ($param) = @_;
- my $cephver = $param->{version} || 'pacific'; # NOTE: always change default here too!
+ my $cephver = $param->{version} || $default_ceph_version;
my $repo = $param->{'test-repository'} ? 'test' : 'main';
my $repolist;
if ($cephver eq 'octopus') {
+ warn "Ceph Octopus will go EOL after 2022-07\n";
$repolist = "deb http://download.proxmox.com/debian/ceph-octopus bullseye $repo\n";
} elsif ($cephver eq 'pacific') {
$repolist = "deb http://download.proxmox.com/debian/ceph-pacific bullseye $repo\n";
+ } elsif ($cephver eq 'quincy') {
+ $repolist = "deb http://download.proxmox.com/debian/ceph-quincy bullseye $repo\n";
} else {
die "unsupported ceph version: $cephver";
}
PVE::Tools::file_set_contents("/etc/apt/sources.list.d/ceph.list", $repolist);
- warn "WARNING: installing non-default ceph release '$cephver'!\n"
- if $cephver !~ qr/^(?:octopus|pacific)$/;
+ my $supported_re = join('|', $supported_ceph_versions->@*);
+ warn "WARNING: installing non-default ceph release '$cephver'!\n" if $cephver !~ qr/^(?:$supported_re)$/;
local $ENV{DEBIAN_FRONTEND} = 'noninteractive';
print "update available package list\n";
nvme-cli
);
+ # got split out with quincy and is required by PVE tooling, conditionally exclude it for older
+ # FIXME: remove condition with PVE 8.0, i.e., once we only support quincy+ new installations
+ if ($cephver ne 'octopus' and $cephver ne 'pacific') {
+ push @ceph_packages, 'ceph-volume';
+ }
+
print "start installation\n";
- # the install flag helps to determine when apt is done installing
+ # this flag helps to determine when apt is actually done installing (vs. partial extracing)
my $install_flag_fn = PVE::Ceph::Tools::ceph_install_flag_file();
- open(my $install_flag, '>', "${install_flag_file}") or
- die "could not open Ceph installation flag - $!\n";
+ open(my $install_flag, '>', $install_flag_fn) or die "could not create install flag - $!\n";
close $install_flag;
if (system(@apt_install, @ceph_packages) != 0) {
- unlink $install_flag_file or
- warn "Could not remove Ceph installation flag - $!";
+ unlink $install_flag_fn or warn "could not remove Ceph installation flag - $!";
die "apt failed during ceph installation ($?)\n";
}
print "\ninstalled ceph $cephver successfully!\n";
+ # done: drop flag file so that the PVE::Ceph::Tools check returns Ok now.
+ unlink $install_flag_fn or warn "could not remove Ceph installation flag - $!";
print "\nreloading API to load new Ceph RADOS library...\n";
run_command([
'systemctl', 'try-reload-or-restart', 'pvedaemon.service', 'pveproxy.service'
]);
- unlink $install_flag_file or
- warn "Could not remove Ceph installation flag - $!";
-
return undef;
}});
return undef;
}});
+my $get_storages = sub {
+ my ($fs, $is_default) = @_;
+
+ my $cfg = PVE::Storage::config();
+
+ my $storages = $cfg->{ids};
+ my $res = {};
+ foreach my $storeid (keys %$storages) {
+ my $curr = $storages->{$storeid};
+ next if $curr->{type} ne 'cephfs';
+ my $cur_fs = $curr->{'fs-name'};
+ $res->{$storeid} = $storages->{$storeid}
+ if (!defined($cur_fs) && $is_default) || (defined($cur_fs) && $fs eq $cur_fs);
+ }
+
+ return $res;
+};
+
+__PACKAGE__->register_method ({
+ name => 'destroyfs',
+ path => 'destroyfs',
+ method => 'DELETE',
+ description => "Destroy a Ceph filesystem",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ description => "The ceph filesystem name.",
+ type => 'string',
+ },
+ 'remove-storages' => {
+ description => "Remove all pveceph-managed storages configured for this fs.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ 'remove-pools' => {
+ description => "Remove data and metadata pools configured for this fs.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::Ceph::Tools::check_ceph_inited();
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $user = $rpcenv->get_user();
+
+ my $fs_name = $param->{name};
+
+ my $fs;
+ my $fs_list = PVE::Ceph::Tools::ls_fs();
+ for my $entry (@$fs_list) {
+ next if $entry->{name} ne $fs_name;
+ $fs = $entry;
+ last;
+ }
+ die "no such cephfs '$fs_name'\n" if !$fs;
+
+ my $worker = sub {
+ my $rados = PVE::RADOS->new();
+
+ if ($param->{'remove-storages'}) {
+ my $defaultfs;
+ my $fs_dump = $rados->mon_command({ prefix => "fs dump" });
+ for my $fs ($fs_dump->{filesystems}->@*) {
+ next if $fs->{id} != $fs_dump->{default_fscid};
+ $defaultfs = $fs->{mdsmap}->{fs_name};
+ }
+ warn "no default fs found, maybe not all relevant storages are removed\n"
+ if !defined($defaultfs);
+
+ my $storages = $get_storages->($fs_name, $fs_name eq ($defaultfs // ''));
+ for my $storeid (keys %$storages) {
+ my $store = $storages->{$storeid};
+ if (!$store->{disable}) {
+ die "storage '$storeid' is not disabled, make sure to disable ".
+ "and unmount the storage first\n";
+ }
+ }
+
+ my $err;
+ for my $storeid (keys %$storages) {
+ # skip external clusters, not managed by pveceph
+ next if $storages->{$storeid}->{monhost};
+ eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
+ if ($@) {
+ warn "failed to remove storage '$storeid': $@\n";
+ $err = 1;
+ }
+ }
+ die "failed to remove (some) storages - check log and remove manually!\n"
+ if $err;
+ }
+
+ PVE::Ceph::Tools::destroy_fs($fs_name, $rados);
+
+ if ($param->{'remove-pools'}) {
+ warn "removing metadata pool '$fs->{metadata_pool}'\n";
+ eval { PVE::Ceph::Tools::destroy_pool($fs->{metadata_pool}, $rados) };
+ warn "$@\n" if $@;
+
+ foreach my $pool ($fs->{data_pools}->@*) {
+ warn "removing data pool '$pool'\n";
+ eval { PVE::Ceph::Tools::destroy_pool($pool, $rados) };
+ warn "$@\n" if $@;
+ }
+ }
+
+ };
+ return $rpcenv->fork_worker('cephdestroyfs', $fs_name, $user, $worker);
+ }});
+
our $cmddef = {
init => [ 'PVE::API2::Ceph', 'init', [], { node => $nodename } ],
pool => {
- ls => [ 'PVE::API2::Ceph::Pools', 'lspools', [], { node => $nodename }, sub {
+ ls => [ 'PVE::API2::Ceph::Pool', 'lspools', [], { node => $nodename }, sub {
my ($data, $schema, $options) = @_;
PVE::CLIFormatter::print_api_result($data, $schema,
[
],
$options);
}, $PVE::RESTHandler::standard_output_options],
- create => [ 'PVE::API2::Ceph::Pools', 'createpool', ['name'], { node => $nodename }],
- destroy => [ 'PVE::API2::Ceph::Pools', 'destroypool', ['name'], { node => $nodename } ],
- set => [ 'PVE::API2::Ceph::Pools', 'setpool', ['name'], { node => $nodename } ],
- get => [ 'PVE::API2::Ceph::Pools', 'getpool', ['name'], { node => $nodename }, sub {
+ create => [ 'PVE::API2::Ceph::Pool', 'createpool', ['name'], { node => $nodename }],
+ destroy => [ 'PVE::API2::Ceph::Pool', 'destroypool', ['name'], { node => $nodename } ],
+ set => [ 'PVE::API2::Ceph::Pool', 'setpool', ['name'], { node => $nodename } ],
+ get => [ 'PVE::API2::Ceph::Pool', 'getpool', ['name'], { node => $nodename }, sub {
my ($data, $schema, $options) = @_;
PVE::CLIFormatter::print_api_result($data, $schema, undef, $options);
}, $PVE::RESTHandler::standard_output_options],
destroypool => { alias => 'pool destroy' },
fs => {
create => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
+ destroy => [ __PACKAGE__, 'destroyfs', ['name'], { node => $nodename }],
},
osd => {
create => [ 'PVE::API2::Ceph::OSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],