-package PVE::API2::Ceph;
+package PVE::API2::CephOSD;
use strict;
use warnings;
-use File::Basename;
-use File::Path;
-use POSIX qw (LONG_MAX);
use Cwd qw(abs_path);
-use IO::Dir;
-use UUID;
-use Net::IP;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach);
use PVE::RESTHandler;
use PVE::RPCEnvironment;
use PVE::JSONSchema qw(get_standard_option);
-use JSON;
use PVE::RADOS;
use PVE::CephTools;
use Data::Dumper; # fixme: remove
+my $get_osd_status = sub {
+ my ($rados, $osdid) = @_;
-# we can use longer rados timeout when inside workers
-my $long_rados_timeout = 60;
+ my $stat = $rados->mon_command({ prefix => 'osd dump' });
-my $pve_osd_default_journal_size = 1024*5;
+ my $osdlist = $stat->{osds} || [];
+ my $osdstat;
+ foreach my $d (@$osdlist) {
+ $osdstat->{$d->{osd}} = $d if defined($d->{osd});
+ }
+ if (defined($osdid)) {
+ die "no such OSD '$osdid'\n" if !$osdstat->{$osdid};
+ return $osdstat->{$osdid};
+ }
-sub list_disks {
- my $disklist = {};
-
- my $fd = IO::File->new("/proc/mounts", "r") ||
- die "unable to open /proc/mounts - $!\n";
+ return $osdstat;
+};
- my $mounted = {};
+__PACKAGE__->register_method ({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "Get Ceph osd list/tree.",
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ # fixme: return a list instead of extjs tree format ?
+ returns => {
+ type => "object",
+ },
+ code => sub {
+ my ($param) = @_;
- while (defined(my $line = <$fd>)) {
- my ($dev, $path, $fstype) = split(/\s+/, $line);
- next if !($dev && $path && $fstype);
- next if $dev !~ m|^/dev/|;
- my $real_dev = abs_path($dev);
- $mounted->{$real_dev} = $path;
- }
- close($fd);
+ PVE::CephTools::check_ceph_inited();
- my $dev_is_mounted = sub {
- my ($dev) = @_;
- return $mounted->{$dev};
- };
+ my $rados = PVE::RADOS->new();
+ my $res = $rados->mon_command({ prefix => 'osd tree' });
- my $dir_is_epmty = sub {
- my ($dir) = @_;
+ die "no tree nodes found\n" if !($res && $res->{nodes});
- my $dh = IO::Dir->new ($dir);
- return 1 if !$dh;
-
- while (defined(my $tmp = $dh->read)) {
- next if $tmp eq '.' || $tmp eq '..';
- $dh->close;
- return 0;
- }
- $dh->close;
- return 1;
- };
-
- my $journal_uuid = '45b0969e-9b03-4f30-b4c6-b4b80ceff106';
-
- my $journalhash = {};
- dir_glob_foreach('/dev/disk/by-parttypeuuid', "$journal_uuid\..+", sub {
- my ($entry) = @_;
- my $real_dev = abs_path("/dev/disk/by-parttypeuuid/$entry");
- $journalhash->{$real_dev} = 1;
- });
-
- dir_glob_foreach('/sys/block', '.*', sub {
- my ($dev) = @_;
-
- return if $dev eq '.';
- return if $dev eq '..';
-
- return if $dev =~ m|^ram\d+$|; # skip ram devices
- return if $dev =~ m|^loop\d+$|; # skip loop devices
- return if $dev =~ m|^md\d+$|; # skip md devices
- return if $dev =~ m|^dm-.*$|; # skip dm related things
- return if $dev =~ m|^fd\d+$|; # skip Floppy
- return if $dev =~ m|^sr\d+$|; # skip CDs
-
- my $devdir = "/sys/block/$dev/device";
- return if ! -d $devdir;
-
- my $size = file_read_firstline("/sys/block/$dev/size");
- return if !$size;
+ my $osdhash = &$get_osd_status($rados);
- $size = $size * 512;
+ my $nodes = {};
+ my $newnodes = {};
+ foreach my $e (@{$res->{nodes}}) {
+ $nodes->{$e->{id}} = $e;
+
+ my $new = {
+ id => $e->{id},
+ name => $e->{name},
+ type => $e->{type}
+ };
- my $info = `udevadm info --path /sys/block/$dev --query all`;
- return if !$info;
+ foreach my $opt (qw(status crush_weight reweight)) {
+ $new->{$opt} = $e->{$opt} if defined($e->{$opt});
+ }
- return if $info !~ m/^E: DEVTYPE=disk$/m;
- return if $info =~ m/^E: ID_CDROM/m;
+ if (my $stat = $osdhash->{$e->{id}}) {
+ $new->{in} = $stat->{in} if defined($stat->{in});
+ }
- my $serial = 'unknown';
- if ($info =~ m/^E: ID_SERIAL_SHORT=(\S+)$/m) {
- $serial = $1;
+ $newnodes->{$e->{id}} = $new;
}
- my $gpt = 0;
- if ($info =~ m/^E: ID_PART_TABLE_TYPE=gpt$/m) {
- $gpt = 1;
+ foreach my $e (@{$res->{nodes}}) {
+ my $new = $newnodes->{$e->{id}};
+ if ($e->{children} && scalar(@{$e->{children}})) {
+ $new->{children} = [];
+ $new->{leaf} = 0;
+ foreach my $cid (@{$e->{children}}) {
+ $nodes->{$cid}->{parent} = $e->{id};
+ if ($nodes->{$cid}->{type} eq 'osd' &&
+ $e->{type} eq 'host') {
+ $newnodes->{$cid}->{host} = $e->{name};
+ }
+ push @{$new->{children}}, $newnodes->{$cid};
+ }
+ } else {
+ $new->{leaf} = ($e->{id} >= 0) ? 1 : 0;
+ }
}
- # detect SSD (fixme - currently only works for ATA disks)
- my $rpm = 7200; # default guess
- if ($info =~ m/^E: ID_ATA_ROTATION_RATE_RPM=(\d+)$/m) {
- $rpm = $1;
+ my $rootnode;
+ foreach my $e (@{$res->{nodes}}) {
+ if (!$nodes->{$e->{id}}->{parent}) {
+ $rootnode = $newnodes->{$e->{id}};
+ last;
+ }
}
- my $vendor = file_read_firstline("$devdir/vendor") || 'unknown';
- my $model = file_read_firstline("$devdir/model") || 'unknown';
+ die "no root node\n" if !$rootnode;
- my $used;
+ my $data = { root => $rootnode };
- $used = 'LVM' if !&$dir_is_epmty("/sys/block/$dev/holders");
+ return $data;
+ }});
- $used = 'mounted' if &$dev_is_mounted("/dev/$dev");
+__PACKAGE__->register_method ({
+ name => 'createosd',
+ path => '',
+ method => 'POST',
+ description => "Create OSD",
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ dev => {
+ description => "Block device name.",
+ type => 'string',
+ },
+ journal_dev => {
+ description => "Block device name for journal.",
+ optional => 1,
+ type => 'string',
+ },
+ fstype => {
+ description => "File system type.",
+ type => 'string',
+ enum => ['xfs', 'ext4', 'btrfs'],
+ default => 'xfs',
+ optional => 1,
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
- $disklist->{$dev} = {
- vendor => $vendor,
- model => $model,
- size => $size,
- serial => $serial,
- gpt => $gpt,
- rmp => $rpm,
- };
+ my $rpcenv = PVE::RPCEnvironment::get();
- my $osdid = -1;
+ my $authuser = $rpcenv->get_user();
- my $journal_count = 0;
+ PVE::CephTools::check_ceph_inited();
- my $found_partitions;
- my $found_lvm;
- my $found_mountpoints;
- dir_glob_foreach("/sys/block/$dev", "$dev.+", sub {
- my ($part) = @_;
+ PVE::CephTools::setup_pve_symlinks();
- $found_partitions = 1;
+ my $journal_dev;
- if (my $mp = &$dev_is_mounted("/dev/$part")) {
- $found_mountpoints = 1;
- if ($mp =~ m|^/var/lib/ceph/osd/ceph-(\d+)$|) {
- $osdid = $1;
- }
- }
- if (!&$dir_is_epmty("/sys/block/$dev/$part/holders")) {
- $found_lvm = 1;
- }
- $journal_count++ if $journalhash->{"/dev/$part"};
- });
+ if ($param->{journal_dev} && ($param->{journal_dev} ne $param->{dev})) {
+ $journal_dev = PVE::CephTools::verify_blockdev_path($param->{journal_dev});
+ }
- $used = 'mounted' if $found_mountpoints && !$used;
- $used = 'LVM' if $found_lvm && !$used;
- $used = 'partitions' if $found_partitions && !$used;
+ $param->{dev} = PVE::CephTools::verify_blockdev_path($param->{dev});
- $disklist->{$dev}->{used} = $used if $used;
- $disklist->{$dev}->{osdid} = $osdid;
- $disklist->{$dev}->{journals} = $journal_count;
- });
+ my $disklist = PVE::CephTools::list_disks();
- return $disklist;
-}
+ my $devname = $param->{dev};
+ $devname =~ s|/dev/||;
+
+ my $diskinfo = $disklist->{$devname};
+ die "unable to get device info for '$devname'\n"
+ if !$diskinfo;
-my $lookup_diskinfo = sub {
- my ($disklist, $disk) = @_;
+ die "device '$param->{dev}' is in use\n"
+ if $diskinfo->{used};
- my $real_dev = abs_path($disk);
- $real_dev =~ s|/dev/||;
- my $diskinfo = $disklist->{$real_dev};
-
- die "disk '$disk' not found in disk list\n" if !$diskinfo;
+ my $rados = PVE::RADOS->new();
+ my $monstat = $rados->mon_command({ prefix => 'mon_status' });
+ die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
- return wantarray ? ($diskinfo, $real_dev) : $diskinfo;
-};
+ my $fsid = $monstat->{monmap}->{fsid};
+ $fsid = $1 if $fsid =~ m/^([0-9a-f\-]+)$/;
-
-my $count_journal_disks = sub {
- my ($disklist, $disk) = @_;
+ my $ceph_bootstrap_osd_keyring = PVE::CephTools::get_config('ceph_bootstrap_osd_keyring');
- my $count = 0;
+ if (! -f $ceph_bootstrap_osd_keyring) {
+ my $bindata = $rados->mon_command({ prefix => 'auth get client.bootstrap-osd', format => 'plain' });
+ PVE::Tools::file_set_contents($ceph_bootstrap_osd_keyring, $bindata);
+ };
+
+ my $worker = sub {
+ my $upid = shift;
- my ($diskinfo, $real_dev) = &$lookup_diskinfo($disklist, $disk);
- die "journal disk '$disk' does not contain a GUID partition table\n"
- if !$diskinfo->{gpt};
+ my $fstype = $param->{fstype} || 'xfs';
- $count = $diskinfo->{journals} if $diskinfo->{journals};
+ print "create OSD on $param->{dev} ($fstype)\n";
- return $count;
-};
+ my $ccname = PVE::CephTools::get_config('ccname');
-__PACKAGE__->register_method ({
- name => 'index',
- path => '',
- method => 'GET',
- description => "Directory index.",
- permissions => { user => 'all' },
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- },
- },
- returns => {
- type => 'array',
- items => {
- type => "object",
- properties => {},
- },
- links => [ { rel => 'child', href => "{name}" } ],
- },
- code => sub {
- my ($param) = @_;
+ my $cmd = ['ceph-disk', 'prepare', '--zap-disk', '--fs-type', $fstype,
+ '--cluster', $ccname, '--cluster-uuid', $fsid ];
- my $result = [
- { name => 'init' },
- { name => 'mon' },
- { name => 'osd' },
- { name => 'pools' },
- { name => 'stop' },
- { name => 'start' },
- { name => 'status' },
- { name => 'crush' },
- { name => 'config' },
- { name => 'log' },
- { name => 'disks' },
- ];
+ if ($journal_dev) {
+ print "using device '$journal_dev' for journal\n";
+ push @$cmd, '--journal-dev', $param->{dev}, $journal_dev;
+ } else {
+ push @$cmd, $param->{dev};
+ }
+
+ run_command($cmd);
+ };
- return $result;
+ return $rpcenv->fork_worker('cephcreateosd', $devname, $authuser, $worker);
}});
__PACKAGE__->register_method ({
- name => 'disks',
- path => 'disks',
- method => 'GET',
- description => "List local disks.",
+ name => 'destroyosd',
+ path => '{osdid}',
+ method => 'DELETE',
+ description => "Destroy OSD",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- type => {
- description => "Only list specific types of disks.",
- type => 'string',
- enum => ['unused', 'journal_disks'],
- optional => 1,
+ osdid => {
+ description => 'OSD ID',
+ type => 'integer',
},
- },
- },
- returns => {
- type => 'array',
- items => {
- type => "object",
- properties => {
- dev => { type => 'string' },
- used => { type => 'string', optional => 1 },
- gpt => { type => 'boolean' },
- size => { type => 'integer' },
- osdid => { type => 'integer' },
- vendor => { type => 'string', optional => 1 },
- model => { type => 'string', optional => 1 },
- serial => { type => 'string', optional => 1 },
+ cleanup => {
+ description => "If set, we remove partition table entries.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
},
},
- # links => [ { rel => 'child', href => "{}" } ],
},
+ returns => { type => 'string' },
code => sub {
my ($param) = @_;
- PVE::CephTools::check_ceph_inited();
+ my $rpcenv = PVE::RPCEnvironment::get();
- my $disks = list_disks();
+ my $authuser = $rpcenv->get_user();
- my $res = [];
- foreach my $dev (keys %$disks) {
- my $d = $disks->{$dev};
- if ($param->{type}) {
- if ($param->{type} eq 'journal_disks') {
- next if $d->{osdid} >= 0;
- next if !$d->{gpt};
- } elsif ($param->{type} eq 'unused') {
- next if $d->{used};
- } else {
- die "internal error"; # should not happen
- }
- }
+ PVE::CephTools::check_ceph_inited();
- $d->{dev} = "/dev/$dev";
- push @$res, $d;
- }
+ my $osdid = $param->{osdid};
- return $res;
- }});
+ my $rados = PVE::RADOS->new();
+ my $osdstat = &$get_osd_status($rados, $osdid);
-__PACKAGE__->register_method ({
- name => 'config',
- path => 'config',
- method => 'GET',
- description => "Get Ceph configuration.",
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- },
- },
- returns => { type => 'string' },
- code => sub {
- my ($param) = @_;
+ die "osd is in use (in == 1)\n" if $osdstat->{in};
+ #&$run_ceph_cmd(['osd', 'out', $osdid]);
- PVE::CephTools::check_ceph_inited();
+ die "osd is still runnung (up == 1)\n" if $osdstat->{up};
- my $path = PVE::CephTools::get_config('pve_ceph_cfgpath');
- return PVE::Tools::file_get_contents($path);
+ my $osdsection = "osd.$osdid";
- }});
+ my $worker = sub {
+ my $upid = shift;
-__PACKAGE__->register_method ({
- name => 'listmon',
- path => 'mon',
- method => 'GET',
- description => "Get Ceph monitor list.",
- proxyto => 'node',
- protected => 1,
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- },
- },
- returns => {
- type => 'array',
- items => {
- type => "object",
- properties => {
- name => { type => 'string' },
- addr => { type => 'string' },
- },
- },
- links => [ { rel => 'child', href => "{name}" } ],
- },
- code => sub {
- my ($param) = @_;
+ # reopen with longer timeout
+ $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
- PVE::CephTools::check_ceph_inited();
+ print "destroy OSD $osdsection\n";
- my $res = [];
+ eval { PVE::CephTools::ceph_service_cmd('stop', $osdsection); };
+ warn $@ if $@;
- my $cfg = PVE::CephTools::parse_ceph_config();
+ print "Remove $osdsection from the CRUSH map\n";
+ $rados->mon_command({ prefix => "osd crush remove", name => $osdsection, format => 'plain' });
- my $monhash = {};
- foreach my $section (keys %$cfg) {
- my $d = $cfg->{$section};
- if ($section =~ m/^mon\.(\S+)$/) {
- my $monid = $1;
- if ($d->{'mon addr'} && $d->{'host'}) {
- $monhash->{$monid} = {
- addr => $d->{'mon addr'},
- host => $d->{'host'},
- name => $monid,
+ print "Remove the $osdsection authentication key.\n";
+ $rados->mon_command({ prefix => "auth del", entity => $osdsection, format => 'plain' });
+
+ print "Remove OSD $osdsection\n";
+ $rados->mon_command({ prefix => "osd rm", ids => [ $osdsection ], format => 'plain' });
+
+ # try to unmount from standard mount point
+ my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
+
+ my $remove_partition = sub {
+ my ($disklist, $part) = @_;
+
+ return if !$part || (! -b $part );
+
+ foreach my $real_dev (keys %$disklist) {
+ my $diskinfo = $disklist->{$real_dev};
+ next if !$diskinfo->{gpt};
+ if ($part =~ m|^/dev/${real_dev}(\d+)$|) {
+ my $partnum = $1;
+ print "remove partition $part (disk '/dev/${real_dev}', partnum $partnum)\n";
+ eval { run_command(['/sbin/sgdisk', '-d', $partnum, "/dev/${real_dev}"]); };
+ warn $@ if $@;
+ last;
}
}
- }
- }
+ };
- eval {
- my $rados = PVE::RADOS->new();
- my $monstat = $rados->mon_command({ prefix => 'mon_status' });
- my $mons = $monstat->{monmap}->{mons};
- foreach my $d (@$mons) {
- next if !defined($d->{name});
- $monhash->{$d->{name}}->{rank} = $d->{rank};
- $monhash->{$d->{name}}->{addr} = $d->{addr};
- if (grep { $_ eq $d->{rank} } @{$monstat->{quorum}}) {
- $monhash->{$d->{name}}->{quorum} = 1;
+ my $journal_part;
+ my $data_part;
+
+ if ($param->{cleanup}) {
+ my $jpath = "$mountpoint/journal";
+ $journal_part = abs_path($jpath);
+
+ if (my $fd = IO::File->new("/proc/mounts", "r")) {
+ while (defined(my $line = <$fd>)) {
+ my ($dev, $path, $fstype) = split(/\s+/, $line);
+ next if !($dev && $path && $fstype);
+ next if $dev !~ m|^/dev/|;
+ if ($path eq $mountpoint) {
+ $data_part = abs_path($dev);
+ last;
+ }
+ }
+ close($fd);
}
}
+
+ print "Unmount OSD $osdsection from $mountpoint\n";
+ eval { run_command(['umount', $mountpoint]); };
+ if (my $err = $@) {
+ warn $err;
+ } elsif ($param->{cleanup}) {
+ my $disklist = PVE::CephTools::list_disks();
+ &$remove_partition($disklist, $journal_part);
+ &$remove_partition($disklist, $data_part);
+ }
};
- warn $@ if $@;
- return PVE::RESTHandler::hash_to_array($monhash, 'name');
+ return $rpcenv->fork_worker('cephdestroyosd', $osdsection, $authuser, $worker);
}});
__PACKAGE__->register_method ({
- name => 'init',
- path => 'init',
+ name => 'in',
+ path => '{osdid}/in',
method => 'POST',
- description => "Create initial ceph default configuration and setup symlinks.",
+ description => "ceph osd in",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- network => {
- description => "Use specific network for all ceph related traffic",
- type => 'string', format => 'CIDR',
- optional => 1,
- maxLength => 128,
- },
- size => {
- description => 'Number of replicas per object',
- type => 'integer',
- default => 2,
- optional => 1,
- minimum => 1,
- maximum => 3,
- },
- pg_bits => {
- description => "Placement group bits, used to specify the default number of placement groups (Note: 'osd pool default pg num' does not work for deafult pools)",
+ osdid => {
+ description => 'OSD ID',
type => 'integer',
- default => 6,
- optional => 1,
- minimum => 6,
- maximum => 14,
},
},
},
- returns => { type => 'null' },
+ returns => { type => "null" },
code => sub {
my ($param) = @_;
- PVE::CephTools::check_ceph_installed();
-
- # simply load old config if it already exists
- my $cfg = PVE::CephTools::parse_ceph_config();
-
- if (!$cfg->{global}) {
-
- my $fsid;
- my $uuid;
-
- UUID::generate($uuid);
- UUID::unparse($uuid, $fsid);
-
- $cfg->{global} = {
- 'fsid' => $fsid,
- 'auth supported' => 'cephx',
- 'auth cluster required' => 'cephx',
- 'auth service required' => 'cephx',
- 'auth client required' => 'cephx',
- 'filestore xattr use omap' => 'true',
- 'osd journal size' => $pve_osd_default_journal_size,
- 'osd pool default min size' => 1,
- };
-
- # this does not work for default pools
- #'osd pool default pg num' => $pg_num,
- #'osd pool default pgp num' => $pg_num,
- }
-
- $cfg->{global}->{keyring} = '/etc/pve/priv/$cluster.$name.keyring';
- $cfg->{osd}->{keyring} = '/var/lib/ceph/osd/ceph-$id/keyring';
+ PVE::CephTools::check_ceph_inited();
- $cfg->{global}->{'osd pool default size'} = $param->{size} if $param->{size};
+ my $osdid = $param->{osdid};
- if ($param->{pg_bits}) {
- $cfg->{global}->{'osd pg bits'} = $param->{pg_bits};
- $cfg->{global}->{'osd pgp bits'} = $param->{pg_bits};
- }
+ my $rados = PVE::RADOS->new();
- if ($param->{network}) {
- $cfg->{global}->{'public network'} = $param->{network};
- $cfg->{global}->{'cluster network'} = $param->{network};
- }
+ my $osdstat = &$get_osd_status($rados, $osdid); # osd exists?
- PVE::CephTools::write_ceph_config($cfg);
+ my $osdsection = "osd.$osdid";
- PVE::CephTools::setup_pve_symlinks();
+ $rados->mon_command({ prefix => "osd in", ids => [ $osdsection ], format => 'plain' });
return undef;
}});
-my $find_node_ip = sub {
- my ($cidr) = @_;
-
- my $config = PVE::INotify::read_file('interfaces');
-
- my $net = Net::IP->new($cidr) || die Net::IP::Error() . "\n";
-
- foreach my $iface (keys %$config) {
- my $d = $config->{$iface};
- next if !$d->{address};
- my $a = Net::IP->new($d->{address});
- next if !$a;
- return $d->{address} if $net->overlaps($a);
- }
-
- die "unable to find local address within network '$cidr'\n";
-};
-
__PACKAGE__->register_method ({
- name => 'createmon',
- path => 'mon',
+ name => 'out',
+ path => '{osdid}/out',
method => 'POST',
- description => "Create Ceph Monitor",
+ description => "ceph osd out",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
+ osdid => {
+ description => 'OSD ID',
+ type => 'integer',
+ },
},
},
- returns => { type => 'string' },
+ returns => { type => "null" },
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited();
- PVE::CephTools::setup_pve_symlinks();
+ my $osdid = $param->{osdid};
- my $rpcenv = PVE::RPCEnvironment::get();
+ my $rados = PVE::RADOS->new();
- my $authuser = $rpcenv->get_user();
+ my $osdstat = &$get_osd_status($rados, $osdid); # osd exists?
- my $cfg = PVE::CephTools::parse_ceph_config();
+ my $osdsection = "osd.$osdid";
- my $moncount = 0;
+ $rados->mon_command({ prefix => "osd out", ids => [ $osdsection ], format => 'plain' });
- my $monaddrhash = {};
+ return undef;
+ }});
- foreach my $section (keys %$cfg) {
- next if $section eq 'global';
- my $d = $cfg->{$section};
- if ($section =~ m/^mon\./) {
- $moncount++;
- if ($d->{'mon addr'}) {
- $monaddrhash->{$d->{'mon addr'}} = $section;
- }
- }
- }
+package PVE::API2::Ceph;
+
+use strict;
+use warnings;
+use File::Basename;
+use File::Path;
+use POSIX qw (LONG_MAX);
+use Cwd qw(abs_path);
+use IO::Dir;
+use UUID;
+use Net::IP;
+
+use PVE::SafeSyslog;
+use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach);
+use PVE::Exception qw(raise raise_param_exc);
+use PVE::INotify;
+use PVE::Cluster qw(cfs_lock_file cfs_read_file cfs_write_file);
+use PVE::AccessControl;
+use PVE::Storage;
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+use PVE::JSONSchema qw(get_standard_option);
+use JSON;
+use PVE::RADOS;
+use PVE::CephTools;
+
+use base qw(PVE::RESTHandler);
+
+use Data::Dumper; # fixme: remove
+
+my $pve_osd_default_journal_size = 1024*5;
+
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::CephOSD",
+ path => 'osd',
+});
+
+__PACKAGE__->register_method ({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "Directory index.",
+ permissions => { user => 'all' },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {},
+ },
+ links => [ { rel => 'child', href => "{name}" } ],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $result = [
+ { name => 'init' },
+ { name => 'mon' },
+ { name => 'osd' },
+ { name => 'pools' },
+ { name => 'stop' },
+ { name => 'start' },
+ { name => 'status' },
+ { name => 'crush' },
+ { name => 'config' },
+ { name => 'log' },
+ { name => 'disks' },
+ ];
+
+ return $result;
+ }});
+
+__PACKAGE__->register_method ({
+ name => 'disks',
+ path => 'disks',
+ method => 'GET',
+ description => "List local disks.",
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ type => {
+ description => "Only list specific types of disks.",
+ type => 'string',
+ enum => ['unused', 'journal_disks'],
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {
+ dev => { type => 'string' },
+ used => { type => 'string', optional => 1 },
+ gpt => { type => 'boolean' },
+ size => { type => 'integer' },
+ osdid => { type => 'integer' },
+ vendor => { type => 'string', optional => 1 },
+ model => { type => 'string', optional => 1 },
+ serial => { type => 'string', optional => 1 },
+ },
+ },
+ # links => [ { rel => 'child', href => "{}" } ],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $disks = PVE::CephTools::list_disks();
+
+ my $res = [];
+ foreach my $dev (keys %$disks) {
+ my $d = $disks->{$dev};
+ if ($param->{type}) {
+ if ($param->{type} eq 'journal_disks') {
+ next if $d->{osdid} >= 0;
+ next if !$d->{gpt};
+ } elsif ($param->{type} eq 'unused') {
+ next if $d->{used};
+ } else {
+ die "internal error"; # should not happen
+ }
+ }
+
+ $d->{dev} = "/dev/$dev";
+ push @$res, $d;
+ }
+
+ return $res;
+ }});
+
+__PACKAGE__->register_method ({
+ name => 'config',
+ path => 'config',
+ method => 'GET',
+ description => "Get Ceph configuration.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $path = PVE::CephTools::get_config('pve_ceph_cfgpath');
+ return PVE::Tools::file_get_contents($path);
+
+ }});
+
+__PACKAGE__->register_method ({
+ name => 'listmon',
+ path => 'mon',
+ method => 'GET',
+ description => "Get Ceph monitor list.",
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {
+ name => { type => 'string' },
+ addr => { type => 'string' },
+ },
+ },
+ links => [ { rel => 'child', href => "{name}" } ],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $res = [];
+
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ my $monhash = {};
+ foreach my $section (keys %$cfg) {
+ my $d = $cfg->{$section};
+ if ($section =~ m/^mon\.(\S+)$/) {
+ my $monid = $1;
+ if ($d->{'mon addr'} && $d->{'host'}) {
+ $monhash->{$monid} = {
+ addr => $d->{'mon addr'},
+ host => $d->{'host'},
+ name => $monid,
+ }
+ }
+ }
+ }
+
+ eval {
+ my $rados = PVE::RADOS->new();
+ my $monstat = $rados->mon_command({ prefix => 'mon_status' });
+ my $mons = $monstat->{monmap}->{mons};
+ foreach my $d (@$mons) {
+ next if !defined($d->{name});
+ $monhash->{$d->{name}}->{rank} = $d->{rank};
+ $monhash->{$d->{name}}->{addr} = $d->{addr};
+ if (grep { $_ eq $d->{rank} } @{$monstat->{quorum}}) {
+ $monhash->{$d->{name}}->{quorum} = 1;
+ }
+ }
+ };
+ warn $@ if $@;
+
+ return PVE::RESTHandler::hash_to_array($monhash, 'name');
+ }});
+
+__PACKAGE__->register_method ({
+ name => 'init',
+ path => 'init',
+ method => 'POST',
+ description => "Create initial ceph default configuration and setup symlinks.",
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ network => {
+ description => "Use specific network for all ceph related traffic",
+ type => 'string', format => 'CIDR',
+ optional => 1,
+ maxLength => 128,
+ },
+ size => {
+ description => 'Number of replicas per object',
+ type => 'integer',
+ default => 2,
+ optional => 1,
+ minimum => 1,
+ maximum => 3,
+ },
+ pg_bits => {
+ description => "Placement group bits, used to specify the default number of placement groups (Note: 'osd pool default pg num' does not work for deafult pools)",
+ type => 'integer',
+ default => 6,
+ optional => 1,
+ minimum => 6,
+ maximum => 14,
+ },
+ },
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_installed();
+
+ # simply load old config if it already exists
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ if (!$cfg->{global}) {
+
+ my $fsid;
+ my $uuid;
+
+ UUID::generate($uuid);
+ UUID::unparse($uuid, $fsid);
+
+ $cfg->{global} = {
+ 'fsid' => $fsid,
+ 'auth supported' => 'cephx',
+ 'auth cluster required' => 'cephx',
+ 'auth service required' => 'cephx',
+ 'auth client required' => 'cephx',
+ 'filestore xattr use omap' => 'true',
+ 'osd journal size' => $pve_osd_default_journal_size,
+ 'osd pool default min size' => 1,
+ };
+
+ # this does not work for default pools
+ #'osd pool default pg num' => $pg_num,
+ #'osd pool default pgp num' => $pg_num,
+ }
+
+ $cfg->{global}->{keyring} = '/etc/pve/priv/$cluster.$name.keyring';
+ $cfg->{osd}->{keyring} = '/var/lib/ceph/osd/ceph-$id/keyring';
+
+ $cfg->{global}->{'osd pool default size'} = $param->{size} if $param->{size};
+
+ if ($param->{pg_bits}) {
+ $cfg->{global}->{'osd pg bits'} = $param->{pg_bits};
+ $cfg->{global}->{'osd pgp bits'} = $param->{pg_bits};
+ }
+
+ if ($param->{network}) {
+ $cfg->{global}->{'public network'} = $param->{network};
+ $cfg->{global}->{'cluster network'} = $param->{network};
+ }
+
+ PVE::CephTools::write_ceph_config($cfg);
+
+ PVE::CephTools::setup_pve_symlinks();
+
+ return undef;
+ }});
+
+my $find_node_ip = sub {
+ my ($cidr) = @_;
+
+ my $config = PVE::INotify::read_file('interfaces');
+
+ my $net = Net::IP->new($cidr) || die Net::IP::Error() . "\n";
+
+ foreach my $iface (keys %$config) {
+ my $d = $config->{$iface};
+ next if !$d->{address};
+ my $a = Net::IP->new($d->{address});
+ next if !$a;
+ return $d->{address} if $net->overlaps($a);
+ }
+
+ die "unable to find local address within network '$cidr'\n";
+};
+
+__PACKAGE__->register_method ({
+ name => 'createmon',
+ path => 'mon',
+ method => 'POST',
+ description => "Create Ceph Monitor",
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_inited();
+
+ PVE::CephTools::setup_pve_symlinks();
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ my $moncount = 0;
+
+ my $monaddrhash = {};
+
+ foreach my $section (keys %$cfg) {
+ next if $section eq 'global';
+ my $d = $cfg->{$section};
+ if ($section =~ m/^mon\./) {
+ $moncount++;
+ if ($d->{'mon addr'}) {
+ $monaddrhash->{$d->{'mon addr'}} = $section;
+ }
+ }
+ }
my $monid;
for (my $i = 0; $i < 7; $i++) {
mkdir $mondir;
if ($moncount > 0) {
- my $rados = PVE::RADOS->new(timeout => $long_rados_timeout);
+ my $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
my $mapdata = $rados->mon_command({ prefix => 'mon getmap', format => 'plain' });
PVE::Tools::file_set_contents($monmap, $mapdata);
} else {
my $upid = shift;
# reopen with longer timeout
- $rados = PVE::RADOS->new(timeout => $long_rados_timeout);
+ $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
$rados->mon_command({ prefix => "mon remove", name => $monid, format => 'plain' });
__PACKAGE__->register_method ({
name => 'createpool',
- path => 'pools',
- method => 'POST',
- description => "Create POOL",
- proxyto => 'node',
- protected => 1,
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- name => {
- description => "The name of the pool. It must be unique.",
- type => 'string',
- },
- size => {
- description => 'Number of replicas per object',
- type => 'integer',
- default => 2,
- optional => 1,
- minimum => 1,
- maximum => 3,
- },
- min_size => {
- description => 'Minimum number of replicas per object',
- type => 'integer',
- default => 1,
- optional => 1,
- minimum => 1,
- maximum => 3,
- },
- pg_num => {
- description => "Number of placement groups.",
- type => 'integer',
- default => 64,
- optional => 1,
- minimum => 8,
- maximum => 32768,
- },
- crush_ruleset => {
- description => "The ruleset to use for mapping object placement in the cluster.",
- type => 'integer',
- minimum => 0,
- maximum => 32768,
- default => 0,
- optional => 1,
- },
- },
- },
- returns => { type => 'null' },
- code => sub {
- my ($param) = @_;
-
- PVE::CephTools::check_ceph_inited();
-
- my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
-
- die "not fully configured - missing '$pve_ckeyring_path'\n"
- if ! -f $pve_ckeyring_path;
-
- my $pg_num = $param->{pg_num} || 64;
- my $size = $param->{size} || 2;
- my $min_size = $param->{min_size} || 1;
- my $ruleset = $param->{crush_ruleset} || 0;
- my $rados = PVE::RADOS->new();
-
- $rados->mon_command({
- prefix => "osd pool create",
- pool => $param->{name},
- pg_num => int($pg_num),
-# this does not work for unknown reason
-# properties => ["size=$size", "min_size=$min_size", "crush_ruleset=$ruleset"],
- format => 'plain',
- });
-
- $rados->mon_command({
- prefix => "osd pool set",
- pool => $param->{name},
- var => 'min_size',
- val => $min_size,
- format => 'plain',
- });
-
- $rados->mon_command({
- prefix => "osd pool set",
- pool => $param->{name},
- var => 'size',
- val => $size,
- format => 'plain',
- });
-
- if (defined($param->{crush_ruleset})) {
- $rados->mon_command({
- prefix => "osd pool set",
- pool => $param->{name},
- var => 'crush_ruleset',
- val => $param->{crush_ruleset},
- format => 'plain',
- });
- }
-
- return undef;
- }});
-
-__PACKAGE__->register_method ({
- name => 'destroypool',
- path => 'pools/{name}',
- method => 'DELETE',
- description => "Destroy pool",
- proxyto => 'node',
- protected => 1,
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- name => {
- description => "The name of the pool. It must be unique.",
- type => 'string',
- },
- },
- },
- returns => { type => 'null' },
- code => sub {
- my ($param) = @_;
-
- PVE::CephTools::check_ceph_inited();
-
- my $rados = PVE::RADOS->new();
- # fixme: '--yes-i-really-really-mean-it'
- $rados->mon_command({
- prefix => "osd pool delete",
- pool => $param->{name},
- pool2 => $param->{name},
- sure => '--yes-i-really-really-mean-it',
- format => 'plain',
- });
-
- return undef;
- }});
-
-__PACKAGE__->register_method ({
- name => 'listosd',
- path => 'osd',
- method => 'GET',
- description => "Get Ceph osd list/tree.",
- proxyto => 'node',
- protected => 1,
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- },
- },
- returns => {
- type => "object",
- },
- code => sub {
- my ($param) = @_;
-
- PVE::CephTools::check_ceph_inited();
-
- my $rados = PVE::RADOS->new();
- my $res = $rados->mon_command({ prefix => 'osd tree' });
-
- die "no tree nodes found\n" if !($res && $res->{nodes});
-
- my $nodes = {};
- my $newnodes = {};
- foreach my $e (@{$res->{nodes}}) {
- $nodes->{$e->{id}} = $e;
-
- my $new = {
- id => $e->{id},
- name => $e->{name},
- type => $e->{type}
- };
-
- foreach my $opt (qw(status crush_weight reweight)) {
- $new->{$opt} = $e->{$opt} if defined($e->{$opt});
- }
-
- $newnodes->{$e->{id}} = $new;
- }
-
- foreach my $e (@{$res->{nodes}}) {
- my $new = $newnodes->{$e->{id}};
- if ($e->{children} && scalar(@{$e->{children}})) {
- $new->{children} = [];
- $new->{leaf} = 0;
- foreach my $cid (@{$e->{children}}) {
- $nodes->{$cid}->{parent} = $e->{id};
- if ($nodes->{$cid}->{type} eq 'osd' &&
- $e->{type} eq 'host') {
- $newnodes->{$cid}->{host} = $e->{name};
- }
- push @{$new->{children}}, $newnodes->{$cid};
- }
- } else {
- $new->{leaf} = ($e->{id} >= 0) ? 1 : 0;
- }
- }
-
- my $rootnode;
- foreach my $e (@{$res->{nodes}}) {
- if (!$nodes->{$e->{id}}->{parent}) {
- $rootnode = $newnodes->{$e->{id}};
- last;
- }
- }
-
- die "no root node\n" if !$rootnode;
-
- my $data = { root => $rootnode };
-
- return $data;
- }});
-
-__PACKAGE__->register_method ({
- name => 'createosd',
- path => 'osd',
+ path => 'pools',
method => 'POST',
- description => "Create OSD",
+ description => "Create POOL",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- dev => {
- description => "Block device name.",
+ name => {
+ description => "The name of the pool. It must be unique.",
type => 'string',
},
- journal_dev => {
- description => "Block device name for journal.",
+ size => {
+ description => 'Number of replicas per object',
+ type => 'integer',
+ default => 2,
optional => 1,
- type => 'string',
+ minimum => 1,
+ maximum => 3,
},
- fstype => {
- description => "File system type.",
- type => 'string',
- enum => ['xfs', 'ext4', 'btrfs'],
- default => 'xfs',
+ min_size => {
+ description => 'Minimum number of replicas per object',
+ type => 'integer',
+ default => 1,
+ optional => 1,
+ minimum => 1,
+ maximum => 3,
+ },
+ pg_num => {
+ description => "Number of placement groups.",
+ type => 'integer',
+ default => 64,
+ optional => 1,
+ minimum => 8,
+ maximum => 32768,
+ },
+ crush_ruleset => {
+ description => "The ruleset to use for mapping object placement in the cluster.",
+ type => 'integer',
+ minimum => 0,
+ maximum => 32768,
+ default => 0,
optional => 1,
},
},
},
- returns => { type => 'string' },
+ returns => { type => 'null' },
code => sub {
my ($param) = @_;
- my $rpcenv = PVE::RPCEnvironment::get();
-
- my $authuser = $rpcenv->get_user();
-
PVE::CephTools::check_ceph_inited();
- PVE::CephTools::setup_pve_symlinks();
-
- my $journal_dev;
-
- if ($param->{journal_dev} && ($param->{journal_dev} ne $param->{dev})) {
- $journal_dev = PVE::CephTools::verify_blockdev_path($param->{journal_dev});
- }
-
- $param->{dev} = PVE::CephTools::verify_blockdev_path($param->{dev});
-
- my $disklist = list_disks();
-
- my $devname = $param->{dev};
- $devname =~ s|/dev/||;
-
- my $diskinfo = $disklist->{$devname};
- die "unable to get device info for '$devname'\n"
- if !$diskinfo;
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
- die "device '$param->{dev}' is in use\n"
- if $diskinfo->{used};
+ die "not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
+ my $pg_num = $param->{pg_num} || 64;
+ my $size = $param->{size} || 2;
+ my $min_size = $param->{min_size} || 1;
+ my $ruleset = $param->{crush_ruleset} || 0;
my $rados = PVE::RADOS->new();
- my $monstat = $rados->mon_command({ prefix => 'mon_status' });
- die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
-
- my $fsid = $monstat->{monmap}->{fsid};
- $fsid = $1 if $fsid =~ m/^([0-9a-f\-]+)$/;
- my $ceph_bootstrap_osd_keyring = PVE::CephTools::get_config('ceph_bootstrap_osd_keyring');
-
- if (! -f $ceph_bootstrap_osd_keyring) {
- my $bindata = $rados->mon_command({ prefix => 'auth get client.bootstrap-osd', format => 'plain' });
- PVE::Tools::file_set_contents($ceph_bootstrap_osd_keyring, $bindata);
- };
-
- my $worker = sub {
- my $upid = shift;
-
- my $fstype = $param->{fstype} || 'xfs';
-
- print "create OSD on $param->{dev} ($fstype)\n";
+ $rados->mon_command({
+ prefix => "osd pool create",
+ pool => $param->{name},
+ pg_num => int($pg_num),
+# this does not work for unknown reason
+# properties => ["size=$size", "min_size=$min_size", "crush_ruleset=$ruleset"],
+ format => 'plain',
+ });
- my $ccname = PVE::CephTools::get_config('ccname');
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $param->{name},
+ var => 'min_size',
+ val => $min_size,
+ format => 'plain',
+ });
- my $cmd = ['ceph-disk', 'prepare', '--zap-disk', '--fs-type', $fstype,
- '--cluster', $ccname, '--cluster-uuid', $fsid ];
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $param->{name},
+ var => 'size',
+ val => $size,
+ format => 'plain',
+ });
- if ($journal_dev) {
- print "using device '$journal_dev' for journal\n";
- push @$cmd, '--journal-dev', $param->{dev}, $journal_dev;
- } else {
- push @$cmd, $param->{dev};
- }
-
- run_command($cmd);
- };
+ if (defined($param->{crush_ruleset})) {
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $param->{name},
+ var => 'crush_ruleset',
+ val => $param->{crush_ruleset},
+ format => 'plain',
+ });
+ }
- return $rpcenv->fork_worker('cephcreateosd', $devname, $authuser, $worker);
+ return undef;
}});
__PACKAGE__->register_method ({
- name => 'destroyosd',
- path => 'osd/{osdid}',
+ name => 'destroypool',
+ path => 'pools/{name}',
method => 'DELETE',
- description => "Destroy OSD",
+ description => "Destroy pool",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- osdid => {
- description => 'OSD ID',
- type => 'integer',
- },
- cleanup => {
- description => "If set, we remove partition table entries.",
- type => 'boolean',
- optional => 1,
- default => 0,
+ name => {
+ description => "The name of the pool. It must be unique.",
+ type => 'string',
},
},
},
- returns => { type => 'string' },
+ returns => { type => 'null' },
code => sub {
my ($param) = @_;
- my $rpcenv = PVE::RPCEnvironment::get();
-
- my $authuser = $rpcenv->get_user();
-
PVE::CephTools::check_ceph_inited();
- my $osdid = $param->{osdid};
-
- # fixme: not 100% sure what we should do here
-
my $rados = PVE::RADOS->new();
- my $stat = $rados->mon_command({ prefix => 'osd dump' });
-
- my $osdlist = $stat->{osds} || [];
-
- my $osdstat;
- foreach my $d (@$osdlist) {
- if ($d->{osd} == $osdid) {
- $osdstat = $d;
- last;
- }
- }
- die "no such OSD '$osdid'\n" if !$osdstat;
-
- die "osd is in use (in == 1)\n" if $osdstat->{in};
- #&$run_ceph_cmd(['osd', 'out', $osdid]);
-
- die "osd is still runnung (up == 1)\n" if $osdstat->{up};
-
- my $osdsection = "osd.$osdid";
-
- my $worker = sub {
- my $upid = shift;
-
- # reopen with longer timeout
- $rados = PVE::RADOS->new(timeout => $long_rados_timeout);
-
- print "destroy OSD $osdsection\n";
-
- eval { PVE::CephTools::ceph_service_cmd('stop', $osdsection); };
- warn $@ if $@;
-
- print "Remove $osdsection from the CRUSH map\n";
- $rados->mon_command({ prefix => "osd crush remove", name => $osdsection, format => 'plain' });
-
- print "Remove the $osdsection authentication key.\n";
- $rados->mon_command({ prefix => "auth del", entity => $osdsection, format => 'plain' });
-
- print "Remove OSD $osdsection\n";
- $rados->mon_command({ prefix => "osd rm", ids => "$osdid", format => 'plain' });
-
- # try to unmount from standard mount point
- my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
-
- my $remove_partition = sub {
- my ($disklist, $part) = @_;
-
- return if !$part || (! -b $part );
-
- foreach my $real_dev (keys %$disklist) {
- my $diskinfo = $disklist->{$real_dev};
- next if !$diskinfo->{gpt};
- if ($part =~ m|^/dev/${real_dev}(\d+)$|) {
- my $partnum = $1;
- print "remove partition $part (disk '/dev/${real_dev}', partnum $partnum)\n";
- eval { run_command(['/sbin/sgdisk', '-d', $partnum, "/dev/${real_dev}"]); };
- warn $@ if $@;
- last;
- }
- }
- };
-
- my $journal_part;
- my $data_part;
-
- if ($param->{cleanup}) {
- my $jpath = "$mountpoint/journal";
- $journal_part = abs_path($jpath);
-
- if (my $fd = IO::File->new("/proc/mounts", "r")) {
- while (defined(my $line = <$fd>)) {
- my ($dev, $path, $fstype) = split(/\s+/, $line);
- next if !($dev && $path && $fstype);
- next if $dev !~ m|^/dev/|;
- if ($path eq $mountpoint) {
- $data_part = abs_path($dev);
- last;
- }
- }
- close($fd);
- }
- }
-
- print "Unmount OSD $osdsection from $mountpoint\n";
- eval { run_command(['umount', $mountpoint]); };
- if (my $err = $@) {
- warn $err;
- } elsif ($param->{cleanup}) {
- my $disklist = list_disks();
- &$remove_partition($disklist, $journal_part);
- &$remove_partition($disklist, $data_part);
- }
- };
+ # fixme: '--yes-i-really-really-mean-it'
+ $rados->mon_command({
+ prefix => "osd pool delete",
+ pool => $param->{name},
+ pool2 => $param->{name},
+ sure => '--yes-i-really-really-mean-it',
+ format => 'plain',
+ });
- return $rpcenv->fork_worker('cephdestroyosd', $osdsection, $authuser, $worker);
+ return undef;
}});