-package PVE::API2::Ceph;
+package PVE::API2::CephOSD;
use strict;
use warnings;
-use File::Basename;
-use File::Path;
-use POSIX qw (LONG_MAX);
use Cwd qw(abs_path);
-use IO::Dir;
-use UUID;
use Net::IP;
use PVE::SafeSyslog;
use PVE::RESTHandler;
use PVE::RPCEnvironment;
use PVE::JSONSchema qw(get_standard_option);
-use JSON;
+use PVE::RADOS;
+use PVE::CephTools;
+use PVE::Diskmanage;
use base qw(PVE::RESTHandler);
use Data::Dumper; # fixme: remove
-my $ccname = 'ceph'; # ceph cluster name
-my $ceph_cfgdir = "/etc/ceph";
-my $pve_ceph_cfgpath = "/etc/pve/$ccname.conf";
-my $ceph_cfgpath = "$ceph_cfgdir/$ccname.conf";
-my $pve_mon_key_path = "/etc/pve/priv/$ccname.mon.keyring";
-my $pve_ckeyring_path = "/etc/pve/priv/$ccname.client.admin.keyring";
+my $get_osd_status = sub {
+ my ($rados, $osdid) = @_;
-my $ceph_bootstrap_osd_keyring = "/var/lib/ceph/bootstrap-osd/$ccname.keyring";
-my $ceph_bootstrap_mds_keyring = "/var/lib/ceph/bootstrap-mds/$ccname.keyring";
+ my $stat = $rados->mon_command({ prefix => 'osd dump' });
-my $ceph_bin = "/usr/bin/ceph";
+ my $osdlist = $stat->{osds} || [];
-my $pve_osd_default_journal_size = 1024*5;
+ my $flags = $stat->{flags} || undef;
-sub purge_all_ceph_files {
- # fixme: this is very dangerous - should we really support this function?
+ my $osdstat;
+ foreach my $d (@$osdlist) {
+ $osdstat->{$d->{osd}} = $d if defined($d->{osd});
+ }
+ if (defined($osdid)) {
+ die "no such OSD '$osdid'\n" if !$osdstat->{$osdid};
+ return $osdstat->{$osdid};
+ }
- unlink $ceph_cfgpath;
+ return wantarray? ($osdstat, $flags):$osdstat;
+};
- unlink $pve_ceph_cfgpath;
- unlink $pve_ckeyring_path;
- unlink $pve_mon_key_path;
+my $get_osd_usage = sub {
+ my ($rados) = @_;
- unlink $ceph_bootstrap_osd_keyring;
- unlink $ceph_bootstrap_mds_keyring;
+ my $osdlist = $rados->mon_command({ prefix => 'pg dump',
+ dumpcontents => [ 'osds' ]}) || [];
- system("rm -rf /var/lib/ceph/mon/ceph-*");
+ my $osdstat;
+ foreach my $d (@$osdlist) {
+ $osdstat->{$d->{osd}} = $d if defined($d->{osd});
+ }
- # remove osd?
+ return $osdstat;
+};
-}
+__PACKAGE__->register_method ({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "Get Ceph osd list/tree.",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ # fixme: return a list instead of extjs tree format ?
+ returns => {
+ type => "object",
+ },
+ code => sub {
+ my ($param) = @_;
-my $check_ceph_installed = sub {
- my ($noerr) = @_;
+ PVE::CephTools::check_ceph_inited();
- if (! -x $ceph_bin) {
- die "ceph binaries not installed\n" if !$noerr;
- return undef;
- }
+ my $rados = PVE::RADOS->new();
+ my $res = $rados->mon_command({ prefix => 'osd tree' });
- return 1;
-};
+ die "no tree nodes found\n" if !($res && $res->{nodes});
-my $check_ceph_inited = sub {
- my ($noerr) = @_;
+ my ($osdhash, $flags) = &$get_osd_status($rados);
- return undef if !&$check_ceph_installed($noerr);
+ my $usagehash = &$get_osd_usage($rados);
- if (! -f $pve_ceph_cfgpath) {
- die "pveceph configuration not initialized\n" if !$noerr;
- return undef;
- }
+ my $nodes = {};
+ my $newnodes = {};
+ foreach my $e (@{$res->{nodes}}) {
+ $nodes->{$e->{id}} = $e;
+
+ my $new = {
+ id => $e->{id},
+ name => $e->{name},
+ type => $e->{type}
+ };
- return 1;
-};
+ foreach my $opt (qw(status crush_weight reweight)) {
+ $new->{$opt} = $e->{$opt} if defined($e->{$opt});
+ }
-my $check_ceph_enabled = sub {
- my ($noerr) = @_;
+ if (my $stat = $osdhash->{$e->{id}}) {
+ $new->{in} = $stat->{in} if defined($stat->{in});
+ }
- return undef if !&$check_ceph_inited($noerr);
+ if (my $stat = $usagehash->{$e->{id}}) {
+ $new->{total_space} = ($stat->{kb} || 1) * 1024;
+ $new->{bytes_used} = ($stat->{kb_used} || 0) * 1024;
+ $new->{percent_used} = ($new->{bytes_used}*100)/$new->{total_space};
+ if (my $d = $stat->{fs_perf_stat}) {
+ $new->{commit_latency_ms} = $d->{commit_latency_ms};
+ $new->{apply_latency_ms} = $d->{apply_latency_ms};
+ }
+ }
- if (! -f $ceph_cfgpath) {
- die "pveceph configuration not enabled\n" if !$noerr;
- return undef;
- }
+ $newnodes->{$e->{id}} = $new;
+ }
- return 1;
-};
+ foreach my $e (@{$res->{nodes}}) {
+ my $new = $newnodes->{$e->{id}};
+ if ($e->{children} && scalar(@{$e->{children}})) {
+ $new->{children} = [];
+ $new->{leaf} = 0;
+ foreach my $cid (@{$e->{children}}) {
+ $nodes->{$cid}->{parent} = $e->{id};
+ if ($nodes->{$cid}->{type} eq 'osd' &&
+ $e->{type} eq 'host') {
+ $newnodes->{$cid}->{host} = $e->{name};
+ }
+ push @{$new->{children}}, $newnodes->{$cid};
+ }
+ } else {
+ $new->{leaf} = ($e->{id} >= 0) ? 1 : 0;
+ }
+ }
+
+ my $roots = [];
+ foreach my $e (@{$res->{nodes}}) {
+ if (!$nodes->{$e->{id}}->{parent}) {
+ push @$roots, $newnodes->{$e->{id}};
+ }
+ }
+
+ die "no root node\n" if !@$roots;
+
+ my $data = { root => { leaf => 0, children => $roots } };
-my $parse_ceph_config = sub {
- my ($filename) = @_;
+ # we want this for the noout flag
+ $data->{flags} = $flags if $flags;
- my $cfg = {};
+ return $data;
+ }});
+
+__PACKAGE__->register_method ({
+ name => 'createosd',
+ path => '',
+ method => 'POST',
+ description => "Create OSD",
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ dev => {
+ description => "Block device name.",
+ type => 'string',
+ },
+ journal_dev => {
+ description => "Block device name for journal.",
+ optional => 1,
+ type => 'string',
+ },
+ fstype => {
+ description => "File system type.",
+ type => 'string',
+ enum => ['xfs', 'ext4', 'btrfs'],
+ default => 'xfs',
+ optional => 1,
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
- return $cfg if ! -f $filename;
+ my $rpcenv = PVE::RPCEnvironment::get();
- my $fh = IO::File->new($filename, "r") ||
- die "unable to open '$filename' - $!\n";
+ my $authuser = $rpcenv->get_user();
- my $section;
+ PVE::CephTools::check_ceph_inited();
- while (defined(my $line = <$fh>)) {
- $line =~ s/[;#].*$//;
- $line =~ s/^\s+//;
- $line =~ s/\s+$//;
- next if !$line;
+ PVE::CephTools::setup_pve_symlinks();
- $section = $1 if $line =~ m/^\[(\S+)\]$/;
- if (!$section) {
- warn "no section - skip: $line\n";
- next;
- }
+ my $journal_dev;
- if ($line =~ m/^(.*\S)\s*=\s*(\S.*)$/) {
- $cfg->{$section}->{$1} = $2;
+ if ($param->{journal_dev} && ($param->{journal_dev} ne $param->{dev})) {
+ $journal_dev = PVE::Diskmanage::verify_blockdev_path($param->{journal_dev});
}
- }
+ $param->{dev} = PVE::Diskmanage::verify_blockdev_path($param->{dev});
- return $cfg;
-};
+ my $devname = $param->{dev};
+ $devname =~ s|/dev/||;
-my $run_ceph_cmd = sub {
- my ($cmd, %params) = @_;
-
- my $timeout = 5;
+ my $disklist = PVE::Diskmanage::get_disks($devname, 1);
- run_command(['ceph', '-c', $pve_ceph_cfgpath,
- '--connect-timeout', $timeout,
- @$cmd], %params);
-};
+ my $diskinfo = $disklist->{$devname};
+ die "unable to get device info for '$devname'\n"
+ if !$diskinfo;
-my $run_ceph_cmd_text = sub {
- my ($cmd, %opts) = @_;
+ die "device '$param->{dev}' is in use\n"
+ if $diskinfo->{used};
- my $out = '';
+ my $devpath = $diskinfo->{devpath};
+ my $rados = PVE::RADOS->new();
+ my $monstat = $rados->mon_command({ prefix => 'mon_status' });
+ die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
- my $quiet = delete $opts{quiet};
+ my $fsid = $monstat->{monmap}->{fsid};
+ $fsid = $1 if $fsid =~ m/^([0-9a-f\-]+)$/;
- my $parser = sub {
- my $line = shift;
- $out .= "$line\n";
- };
+ my $ceph_bootstrap_osd_keyring = PVE::CephTools::get_config('ceph_bootstrap_osd_keyring');
- my $errfunc = sub {
- my $line = shift;
- print "$line\n" if !$quiet;
- };
+ if (! -f $ceph_bootstrap_osd_keyring) {
+ my $bindata = $rados->mon_command({ prefix => 'auth get', entity => 'client.bootstrap-osd', format => 'plain' });
+ PVE::Tools::file_set_contents($ceph_bootstrap_osd_keyring, $bindata);
+ };
+
+ my $worker = sub {
+ my $upid = shift;
- &$run_ceph_cmd($cmd, outfunc => $parser, errfunc => $errfunc);
+ my $fstype = $param->{fstype} || 'xfs';
- return $out;
-};
+ print "create OSD on $devpath ($fstype)\n";
-my $run_ceph_cmd_json = sub {
- my ($cmd, %opts) = @_;
+ my $ccname = PVE::CephTools::get_config('ccname');
- my $json = &$run_ceph_cmd_text([@$cmd, '--format', 'json'], %opts);
+ my $cmd = ['ceph-disk', 'prepare', '--zap-disk', '--fs-type', $fstype,
+ '--cluster', $ccname, '--cluster-uuid', $fsid ];
- return decode_json($json);
-};
+ if ($journal_dev) {
+ print "using device '$journal_dev' for journal\n";
+ push @$cmd, '--journal-dev', $devpath, $journal_dev;
+ } else {
+ push @$cmd, $devpath;
+ }
+
+ run_command($cmd);
+ };
-sub ceph_mon_status {
- my ($quiet) = @_;
-
- return &$run_ceph_cmd_json(['mon_status'], quiet => $quiet);
+ return $rpcenv->fork_worker('cephcreateosd', $devname, $authuser, $worker);
+ }});
-}
+__PACKAGE__->register_method ({
+ name => 'destroyosd',
+ path => '{osdid}',
+ method => 'DELETE',
+ description => "Destroy OSD",
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ osdid => {
+ description => 'OSD ID',
+ type => 'integer',
+ },
+ cleanup => {
+ description => "If set, we remove partition table entries.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
-my $ceph_osd_status = sub {
- my ($quiet) = @_;
+ my $rpcenv = PVE::RPCEnvironment::get();
- return &$run_ceph_cmd_json(['osd', 'dump'], quiet => $quiet);
-};
+ my $authuser = $rpcenv->get_user();
-my $write_ceph_config = sub {
- my ($cfg) = @_;
+ PVE::CephTools::check_ceph_inited();
- my $out = '';
+ my $osdid = $param->{osdid};
- my $cond_write_sec = sub {
- my $re = shift;
+ my $rados = PVE::RADOS->new();
+ my $osdstat = &$get_osd_status($rados, $osdid);
- foreach my $section (keys %$cfg) {
- next if $section !~ m/^$re$/;
- $out .= "[$section]\n";
- foreach my $key (sort keys %{$cfg->{$section}}) {
- $out .= "\t $key = $cfg->{$section}->{$key}\n";
- }
- $out .= "\n";
- }
- };
+ die "osd is in use (in == 1)\n" if $osdstat->{in};
+ #&$run_ceph_cmd(['osd', 'out', $osdid]);
- &$cond_write_sec('global');
- &$cond_write_sec('mon');
- &$cond_write_sec('osd');
- &$cond_write_sec('mon\..*');
- &$cond_write_sec('osd\..*');
+ die "osd is still runnung (up == 1)\n" if $osdstat->{up};
- PVE::Tools::file_set_contents($pve_ceph_cfgpath, $out);
-};
+ my $osdsection = "osd.$osdid";
-my $setup_pve_symlinks = sub {
- # fail if we find a real file instead of a link
- if (-f $ceph_cfgpath) {
- my $lnk = readlink($ceph_cfgpath);
- die "file '$ceph_cfgpath' already exists\n"
- if !$lnk || $lnk ne $pve_ceph_cfgpath;
- } else {
- symlink($pve_ceph_cfgpath, $ceph_cfgpath) ||
- die "unable to create symlink '$ceph_cfgpath' - $!\n";
- }
-};
+ my $worker = sub {
+ my $upid = shift;
-my $ceph_service_cmd = sub {
- run_command(['service', 'ceph', '-c', $pve_ceph_cfgpath, @_]);
-};
+ # reopen with longer timeout
+ $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
-sub list_disks {
- my $disklist = {};
-
- my $fd = IO::File->new("/proc/mounts", "r") ||
- die "unable to open /proc/mounts - $!\n";
+ print "destroy OSD $osdsection\n";
- my $mounted = {};
+ eval { PVE::CephTools::ceph_service_cmd('stop', $osdsection); };
+ warn $@ if $@;
- while (defined(my $line = <$fd>)) {
- my ($dev, $path, $fstype) = split(/\s+/, $line);
- next if !($dev && $path && $fstype);
- next if $dev !~ m|^/dev/|;
- my $real_dev = abs_path($dev);
- $mounted->{$real_dev} = $path;
- }
- close($fd);
+ print "Remove $osdsection from the CRUSH map\n";
+ $rados->mon_command({ prefix => "osd crush remove", name => $osdsection, format => 'plain' });
- my $dev_is_mounted = sub {
- my ($dev) = @_;
- return $mounted->{$dev};
- };
+ print "Remove the $osdsection authentication key.\n";
+ $rados->mon_command({ prefix => "auth del", entity => $osdsection, format => 'plain' });
- my $dir_is_epmty = sub {
- my ($dir) = @_;
+ print "Remove OSD $osdsection\n";
+ $rados->mon_command({ prefix => "osd rm", ids => [ $osdsection ], format => 'plain' });
- my $dh = IO::Dir->new ($dir);
- return 1 if !$dh;
-
- while (defined(my $tmp = $dh->read)) {
- next if $tmp eq '.' || $tmp eq '..';
- $dh->close;
- return 0;
- }
- $dh->close;
- return 1;
- };
-
- my $journal_uuid = '45b0969e-9b03-4f30-b4c6-b4b80ceff106';
-
- my $journalhash = {};
- dir_glob_foreach('/dev/disk/by-parttypeuuid', "$journal_uuid\..+", sub {
- my ($entry) = @_;
- my $real_dev = abs_path("/dev/disk/by-parttypeuuid/$entry");
- $journalhash->{$real_dev} = 1;
- });
-
- dir_glob_foreach('/sys/block', '.*', sub {
- my ($dev) = @_;
-
- return if $dev eq '.';
- return if $dev eq '..';
-
- return if $dev =~ m|^ram\d+$|; # skip ram devices
- return if $dev =~ m|^loop\d+$|; # skip loop devices
- return if $dev =~ m|^md\d+$|; # skip md devices
- return if $dev =~ m|^dm-.*$|; # skip dm related things
- return if $dev =~ m|^fd\d+$|; # skip Floppy
- return if $dev =~ m|^sr\d+$|; # skip CDs
-
- my $devdir = "/sys/block/$dev/device";
- return if ! -d $devdir;
-
- my $size = file_read_firstline("/sys/block/$dev/size");
- return if !$size;
+ # try to unmount from standard mount point
+ my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
- $size = $size * 512;
+ my $remove_partition = sub {
+ my ($part) = @_;
- my $info = `udevadm info --path /sys/block/$dev --query all`;
- return if !$info;
+ return if !$part || (! -b $part );
+ my $partnum = PVE::Diskmanage::get_partnum($part);
+ my $devpath = PVE::Diskmanage::get_blockdev($part);
- return if $info !~ m/^E: DEVTYPE=disk$/m;
- return if $info =~ m/^E: ID_CDROM/m;
+ print "remove partition $part (disk '${devpath}', partnum $partnum)\n";
+ eval { run_command(['/sbin/sgdisk', '-d', $partnum, "${devpath}"]); };
+ warn $@ if $@;
+ };
- my $serial = 'unknown';
- if ($info =~ m/^E: ID_SERIAL_SHORT=(\S+)$/m) {
- $serial = $1;
- }
+ my $journal_part;
+ my $data_part;
+
+ if ($param->{cleanup}) {
+ my $jpath = "$mountpoint/journal";
+ $journal_part = abs_path($jpath);
- my $gpt = 0;
- if ($info =~ m/^E: ID_PART_TABLE_TYPE=gpt$/m) {
- $gpt = 1;
- }
+ if (my $fd = IO::File->new("/proc/mounts", "r")) {
+ while (defined(my $line = <$fd>)) {
+ my ($dev, $path, $fstype) = split(/\s+/, $line);
+ next if !($dev && $path && $fstype);
+ next if $dev !~ m|^/dev/|;
+ if ($path eq $mountpoint) {
+ $data_part = abs_path($dev);
+ last;
+ }
+ }
+ close($fd);
+ }
+ }
- # detect SSD (fixme - currently only works for ATA disks)
- my $rpm = 7200; # default guess
- if ($info =~ m/^E: ID_ATA_ROTATION_RATE_RPM=(\d+)$/m) {
- $rpm = $1;
- }
+ print "Unmount OSD $osdsection from $mountpoint\n";
+ eval { run_command(['/bin/umount', $mountpoint]); };
+ if (my $err = $@) {
+ warn $err;
+ } elsif ($param->{cleanup}) {
+ #be aware of the ceph udev rules which can remount.
+ &$remove_partition($data_part);
+ &$remove_partition($journal_part);
+ }
+ };
- my $vendor = file_read_firstline("$devdir/vendor") || 'unknown';
- my $model = file_read_firstline("$devdir/model") || 'unknown';
+ return $rpcenv->fork_worker('cephdestroyosd', $osdsection, $authuser, $worker);
+ }});
- my $used;
+__PACKAGE__->register_method ({
+ name => 'in',
+ path => '{osdid}/in',
+ method => 'POST',
+ description => "ceph osd in",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ osdid => {
+ description => 'OSD ID',
+ type => 'integer',
+ },
+ },
+ },
+ returns => { type => "null" },
+ code => sub {
+ my ($param) = @_;
- $used = 'LVM' if !&$dir_is_epmty("/sys/block/$dev/holders");
+ PVE::CephTools::check_ceph_inited();
- $used = 'mounted' if &$dev_is_mounted("/dev/$dev");
+ my $osdid = $param->{osdid};
- $disklist->{$dev} = {
- vendor => $vendor,
- model => $model,
- size => $size,
- serial => $serial,
- gpt => $gpt,
- rmp => $rpm,
- };
+ my $rados = PVE::RADOS->new();
- my $osdid = -1;
+ my $osdstat = &$get_osd_status($rados, $osdid); # osd exists?
- my $journal_count = 0;
+ my $osdsection = "osd.$osdid";
- my $found_partitions;
- my $found_lvm;
- my $found_mountpoints;
- dir_glob_foreach("/sys/block/$dev", "$dev.+", sub {
- my ($part) = @_;
+ $rados->mon_command({ prefix => "osd in", ids => [ $osdsection ], format => 'plain' });
- $found_partitions = 1;
+ return undef;
+ }});
- if (my $mp = &$dev_is_mounted("/dev/$part")) {
- $found_mountpoints = 1;
- if ($mp =~ m|^/var/lib/ceph/osd/ceph-(\d+)$|) {
- $osdid = $1;
- }
- }
- if (!&$dir_is_epmty("/sys/block/$dev/$part/holders")) {
- $found_lvm = 1;
- }
- $journal_count++ if $journalhash->{"/dev/$part"};
- });
+__PACKAGE__->register_method ({
+ name => 'out',
+ path => '{osdid}/out',
+ method => 'POST',
+ description => "ceph osd out",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ osdid => {
+ description => 'OSD ID',
+ type => 'integer',
+ },
+ },
+ },
+ returns => { type => "null" },
+ code => sub {
+ my ($param) = @_;
- $used = 'mounted' if $found_mountpoints && !$used;
- $used = 'LVM' if $found_lvm && !$used;
- $used = 'partitions' if $found_partitions && !$used;
+ PVE::CephTools::check_ceph_inited();
- $disklist->{$dev}->{used} = $used if $used;
- $disklist->{$dev}->{osdid} = $osdid;
- $disklist->{$dev}->{journals} = $journal_count;
- });
+ my $osdid = $param->{osdid};
- return $disklist;
-}
+ my $rados = PVE::RADOS->new();
-my $lookup_diskinfo = sub {
- my ($disklist, $disk) = @_;
+ my $osdstat = &$get_osd_status($rados, $osdid); # osd exists?
- my $real_dev = abs_path($disk);
- $real_dev =~ s|/dev/||;
- my $diskinfo = $disklist->{$real_dev};
-
- die "disk '$disk' not found in disk list\n" if !$diskinfo;
+ my $osdsection = "osd.$osdid";
- return wantarray ? ($diskinfo, $real_dev) : $diskinfo;
-};
+ $rados->mon_command({ prefix => "osd out", ids => [ $osdsection ], format => 'plain' });
-
-my $count_journal_disks = sub {
- my ($disklist, $disk) = @_;
+ return undef;
+ }});
- my $count = 0;
+package PVE::API2::Ceph;
- my ($diskinfo, $real_dev) = &$lookup_diskinfo($disklist, $disk);
- die "journal disk '$disk' does not contain a GUID partition table\n"
- if !$diskinfo->{gpt};
+use strict;
+use warnings;
+use File::Basename;
+use File::Path;
+use POSIX qw (LONG_MAX);
+use Cwd qw(abs_path);
+use IO::Dir;
+use UUID;
+use Net::IP;
- $count = $diskinfo->{journals} if $diskinfo->{journals};
+use PVE::SafeSyslog;
+use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach);
+use PVE::Exception qw(raise raise_param_exc);
+use PVE::INotify;
+use PVE::Cluster qw(cfs_lock_file cfs_read_file cfs_write_file);
+use PVE::AccessControl;
+use PVE::Storage;
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+use PVE::JSONSchema qw(get_standard_option);
+use JSON;
+use PVE::RADOS;
+use PVE::CephTools;
- return $count;
-};
+use base qw(PVE::RESTHandler);
+
+use Data::Dumper; # fixme: remove
+
+my $pve_osd_default_journal_size = 1024*5;
+
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::CephOSD",
+ path => 'osd',
+});
__PACKAGE__->register_method ({
name => 'index',
method => 'GET',
description => "Directory index.",
permissions => { user => 'all' },
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
{ name => 'config' },
{ name => 'log' },
{ name => 'disks' },
+ { name => 'flags' },
];
return $result;
description => "List local disks.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
- my $disks = list_disks();
+ my $disks = PVE::Diskmanage::get_disks(undef, 1);
my $res = [];
foreach my $dev (keys %$disks) {
name => 'config',
path => 'config',
method => 'GET',
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
description => "Get Ceph configuration.",
parameters => {
additionalProperties => 0,
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
- return PVE::Tools::file_get_contents($pve_ceph_cfgpath);
+ my $path = PVE::CephTools::get_config('pve_ceph_cfgpath');
+ return PVE::Tools::file_get_contents($path);
}});
description => "Get Ceph monitor list.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
my $res = [];
- my $cfg = &$parse_ceph_config($pve_ceph_cfgpath);
+ my $cfg = PVE::CephTools::parse_ceph_config();
my $monhash = {};
foreach my $section (keys %$cfg) {
}
eval {
- my $monstat = ceph_mon_status();
+ my $rados = PVE::RADOS->new();
+ my $monstat = $rados->mon_command({ prefix => 'mon_status' });
my $mons = $monstat->{monmap}->{mons};
foreach my $d (@$mons) {
next if !defined($d->{name});
description => "Create initial ceph default configuration and setup symlinks.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
default => 2,
optional => 1,
minimum => 1,
- maximum => 3,
+ maximum => 7,
},
pg_bits => {
- description => "Placement group bits, used to specify the default number of placement groups (Note: 'osd pool default pg num' does not work for deafult pools)",
+ description => "Placement group bits, used to specify the " .
+ "default number of placement groups.\n\nNOTE: 'osd pool " .
+ "default pg num' does not work for default pools.",
type => 'integer',
default => 6,
optional => 1,
minimum => 6,
maximum => 14,
},
+ disable_cephx => {
+ description => "Disable cephx authentification.\n\n" .
+ "WARNING: cephx is a security feature protecting against " .
+ "man-in-the-middle attacks. Only consider disabling cephx ".
+ "if your network is private!",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
},
},
returns => { type => 'null' },
code => sub {
my ($param) = @_;
- &$check_ceph_installed();
+ PVE::CephTools::check_ceph_installed();
# simply load old config if it already exists
- my $cfg = &$parse_ceph_config($pve_ceph_cfgpath);
+ my $cfg = PVE::CephTools::parse_ceph_config();
if (!$cfg->{global}) {
UUID::generate($uuid);
UUID::unparse($uuid, $fsid);
+ my $auth = $param->{disable_cephx} ? 'none' : 'cephx';
+
$cfg->{global} = {
'fsid' => $fsid,
- 'auth supported' => 'cephx',
- 'auth cluster required' => 'cephx',
- 'auth service required' => 'cephx',
- 'auth client required' => 'cephx',
- 'filestore xattr use omap' => 'true',
+ 'auth cluster required' => $auth,
+ 'auth service required' => $auth,
+ 'auth client required' => $auth,
'osd journal size' => $pve_osd_default_journal_size,
'osd pool default min size' => 1,
+ 'mon allow pool delete' => 'true',
};
# this does not work for default pools
$cfg->{global}->{'cluster network'} = $param->{network};
}
- &$write_ceph_config($cfg);
+ PVE::CephTools::write_ceph_config($cfg);
- &$setup_pve_symlinks();
+ PVE::CephTools::setup_pve_symlinks();
return undef;
}});
my $find_node_ip = sub {
my ($cidr) = @_;
- my $config = PVE::INotify::read_file('interfaces');
-
my $net = Net::IP->new($cidr) || die Net::IP::Error() . "\n";
+ my $id = $net->version == 6 ? 'address6' : 'address';
+
+ my $config = PVE::INotify::read_file('interfaces');
+ my $ifaces = $config->{ifaces};
- foreach my $iface (keys %$config) {
- my $d = $config->{$iface};
- next if !$d->{address};
- my $a = Net::IP->new($d->{address});
+ foreach my $iface (keys %$ifaces) {
+ my $d = $ifaces->{$iface};
+ next if !$d->{$id};
+ my $a = Net::IP->new($d->{$id});
next if !$a;
- return $d->{address} if $net->overlaps($a);
+ return $d->{$id} if $net->overlaps($a);
}
die "unable to find local address within network '$cidr'\n";
description => "Create Ceph Monitor",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
- &$setup_pve_symlinks();
+ PVE::CephTools::setup_pve_symlinks();
my $rpcenv = PVE::RPCEnvironment::get();
my $authuser = $rpcenv->get_user();
- my $cfg = &$parse_ceph_config($pve_ceph_cfgpath);
+ my $cfg = PVE::CephTools::parse_ceph_config();
my $moncount = 0;
my $monaddrhash = {};
+ my $systemd_managed = PVE::CephTools::systemd_managed();
+
foreach my $section (keys %$cfg) {
next if $section eq 'global';
my $d = $cfg->{$section};
$ip = PVE::Cluster::remote_node_ip($param->{node});
}
- my $monaddr = "$ip:6789";
+ my $monaddr = Net::IP::ip_is_ipv6($ip) ? "[$ip]:6789" : "$ip:6789";
my $monname = $param->{node};
die "monitor '$monsection' already exists\n" if $cfg->{$monsection};
my $worker = sub {
my $upid = shift;
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
+
if (! -f $pve_ckeyring_path) {
run_command("ceph-authtool $pve_ckeyring_path --create-keyring " .
"--gen-key -n client.admin");
}
+ my $pve_mon_key_path = PVE::CephTools::get_config('pve_mon_key_path');
if (! -f $pve_mon_key_path) {
run_command("cp $pve_ckeyring_path $pve_mon_key_path.tmp");
run_command("ceph-authtool $pve_mon_key_path.tmp -n client.admin --set-uid=0 " .
"--cap mds 'allow' " .
"--cap osd 'allow *' " .
"--cap mon 'allow *'");
+ run_command("cp $pve_mon_key_path.tmp /etc/ceph/ceph.client.admin.keyring") if $systemd_managed;
+ run_command("chown ceph:ceph /etc/ceph/ceph.client.admin.keyring") if $systemd_managed;
run_command("ceph-authtool $pve_mon_key_path.tmp --gen-key -n mon. --cap mon 'allow *'");
run_command("mv $pve_mon_key_path.tmp $pve_mon_key_path");
}
+ my $ccname = PVE::CephTools::get_config('ccname');
+
my $mondir = "/var/lib/ceph/mon/$ccname-$monid";
-d $mondir && die "monitor filesystem '$mondir' already exist\n";
eval {
mkdir $mondir;
+ run_command("chown ceph:ceph $mondir") if $systemd_managed;
+
if ($moncount > 0) {
- my $monstat = ceph_mon_status(); # online test
- &$run_ceph_cmd(['mon', 'getmap', '-o', $monmap]);
+ my $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
+ my $mapdata = $rados->mon_command({ prefix => 'mon getmap', format => 'plain' });
+ PVE::Tools::file_set_contents($monmap, $mapdata);
} else {
run_command("monmaptool --create --clobber --add $monid $monaddr --print $monmap");
}
run_command("ceph-mon --mkfs -i $monid --monmap $monmap --keyring $pve_mon_key_path");
+ run_command("chown ceph:ceph -R $mondir") if $systemd_managed;
};
my $err = $@;
unlink $monmap;
'mon addr' => $monaddr,
};
- &$write_ceph_config($cfg);
+ PVE::CephTools::write_ceph_config($cfg);
+
+ my $create_keys_pid = fork();
+ if (!defined($create_keys_pid)) {
+ die "Could not spawn ceph-create-keys to create bootstrap keys\n";
+ } elsif ($create_keys_pid == 0) {
+ exit PVE::Tools::run_command(['ceph-create-keys', '-i', $monid]);
+ } else {
+ PVE::CephTools::ceph_service_cmd('start', $monsection);
- &$ceph_service_cmd('start', $monsection);
+ if ($systemd_managed) {
+ #to ensure we have the correct startup order.
+ eval { PVE::Tools::run_command(['/bin/systemctl', 'enable', "ceph-mon\@${monid}.service"]); };
+ warn "Enable ceph-mon\@${monid}.service manually"if $@;
+ }
+ waitpid($create_keys_pid, 0);
+ }
};
return $rpcenv->fork_worker('cephcreatemon', $monsection, $authuser, $worker);
description => "Destroy Ceph monitor.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
my $authuser = $rpcenv->get_user();
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
- my $cfg = &$parse_ceph_config($pve_ceph_cfgpath);
+ my $cfg = PVE::CephTools::parse_ceph_config();
my $monid = $param->{monid};
my $monsection = "mon.$monid";
- my $monstat = ceph_mon_status();
+ my $rados = PVE::RADOS->new();
+ my $monstat = $rados->mon_command({ prefix => 'mon_status' });
my $monlist = $monstat->{monmap}->{mons};
die "no such monitor id '$monid'\n"
if !defined($cfg->{$monsection});
+ my $ccname = PVE::CephTools::get_config('ccname');
my $mondir = "/var/lib/ceph/mon/$ccname-$monid";
-d $mondir || die "monitor filesystem '$mondir' does not exist on this node\n";
my $worker = sub {
my $upid = shift;
- &$run_ceph_cmd(['mon', 'remove', $monid]);
+ # reopen with longer timeout
+ $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
+
+ $rados->mon_command({ prefix => "mon remove", name => $monid, format => 'plain' });
- eval { &$ceph_service_cmd('stop', $monsection); };
+ eval { PVE::CephTools::ceph_service_cmd('stop', $monsection); };
warn $@ if $@;
delete $cfg->{$monsection};
- &$write_ceph_config($cfg);
+ PVE::CephTools::write_ceph_config($cfg);
File::Path::remove_tree($mondir);
};
description => "Stop ceph services.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
my $authuser = $rpcenv->get_user();
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
- my $cfg = &$parse_ceph_config($pve_ceph_cfgpath);
+ my $cfg = PVE::CephTools::parse_ceph_config();
scalar(keys %$cfg) || die "no configuration\n";
my $worker = sub {
push @$cmd, $param->{service};
}
- &$ceph_service_cmd(@$cmd);
+ PVE::CephTools::ceph_service_cmd(@$cmd);
};
return $rpcenv->fork_worker('srvstop', $param->{service} || 'ceph',
description => "Start ceph services.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
my $authuser = $rpcenv->get_user();
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
- my $cfg = &$parse_ceph_config($pve_ceph_cfgpath);
+ my $cfg = PVE::CephTools::parse_ceph_config();
scalar(keys %$cfg) || die "no configuration\n";
my $worker = sub {
push @$cmd, $param->{service};
}
- &$ceph_service_cmd(@$cmd);
+ PVE::CephTools::ceph_service_cmd(@$cmd);
};
return $rpcenv->fork_worker('srvstart', $param->{service} || 'ceph',
description => "Get ceph status.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
code => sub {
my ($param) = @_;
- &$check_ceph_enabled();
+ PVE::CephTools::check_ceph_enabled();
- return &$run_ceph_cmd_json(['status'], quiet => 1);
+ my $rados = PVE::RADOS->new();
+ return $rados->mon_command({ prefix => 'status' });
}});
__PACKAGE__->register_method ({
description => "List all pools.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
+
+ my $rados = PVE::RADOS->new();
+
+ my $stats = {};
+ my $res = $rados->mon_command({ prefix => 'df' });
+ my $total = $res->{stats}->{total_avail_bytes} || 0;
- my $res = &$run_ceph_cmd_json(['osd', 'dump'], quiet => 1);
+ foreach my $d (@{$res->{pools}}) {
+ next if !$d->{stats};
+ next if !defined($d->{id});
+ $stats->{$d->{id}} = $d->{stats};
+ }
+
+ $res = $rados->mon_command({ prefix => 'osd dump' });
my $data = [];
foreach my $e (@{$res->{pools}}) {
foreach my $attr (qw(pool pool_name size min_size pg_num crush_ruleset)) {
$d->{$attr} = $e->{$attr} if defined($e->{$attr});
}
+ if (my $s = $stats->{$d->{pool}}) {
+ $d->{bytes_used} = $s->{bytes_used};
+ $d->{percent_used} = ($s->{bytes_used} / $total)*100
+ if $s->{max_avail} && $total;
+ }
push @$data, $d;
}
+
return $data;
}});
description => "Create POOL",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
default => 2,
optional => 1,
minimum => 1,
- maximum => 3,
+ maximum => 7,
},
min_size => {
description => 'Minimum number of replicas per object',
default => 1,
optional => 1,
minimum => 1,
- maximum => 3,
+ maximum => 7,
},
pg_num => {
description => "Number of placement groups.",
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
+
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
die "not fully configured - missing '$pve_ckeyring_path'\n"
if ! -f $pve_ckeyring_path;
my $pg_num = $param->{pg_num} || 64;
my $size = $param->{size} || 2;
my $min_size = $param->{min_size} || 1;
+ my $ruleset = $param->{crush_ruleset} || 0;
+ my $rados = PVE::RADOS->new();
+
+ $rados->mon_command({
+ prefix => "osd pool create",
+ pool => $param->{name},
+ pg_num => int($pg_num),
+# this does not work for unknown reason
+# properties => ["size=$size", "min_size=$min_size", "crush_ruleset=$ruleset"],
+ format => 'plain',
+ });
- &$run_ceph_cmd(['osd', 'pool', 'create', $param->{name}, $pg_num]);
-
- &$run_ceph_cmd(['osd', 'pool', 'set', $param->{name}, 'min_size', $min_size]);
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $param->{name},
+ var => 'min_size',
+ val => $min_size,
+ format => 'plain',
+ });
- &$run_ceph_cmd(['osd', 'pool', 'set', $param->{name}, 'size', $size]);
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $param->{name},
+ var => 'size',
+ val => $size,
+ format => 'plain',
+ });
if (defined($param->{crush_ruleset})) {
- &$run_ceph_cmd(['osd', 'pool', 'set', $param->{name}, 'crush_ruleset', $param->{crush_ruleset}]);
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $param->{name},
+ var => 'crush_ruleset',
+ val => $param->{crush_ruleset},
+ format => 'plain',
+ });
}
return undef;
}});
__PACKAGE__->register_method ({
- name => 'destroypool',
- path => 'pools/{name}',
- method => 'DELETE',
- description => "Destroy pool",
+ name => 'get_flags',
+ path => 'flags',
+ method => 'GET',
+ description => "get all set ceph flags",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit' ]],
+ },
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- name => {
- description => "The name of the pool. It must be unique.",
- type => 'string',
- },
},
},
- returns => { type => 'null' },
+ returns => { type => 'string' },
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
- &$run_ceph_cmd(['osd', 'pool', 'delete', $param->{name}, $param->{name}, '--yes-i-really-really-mean-it']);
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
- return undef;
+ die "not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
+
+ my $rados = PVE::RADOS->new();
+
+ my $stat = $rados->mon_command({ prefix => 'osd dump' });
+
+ return $stat->{flags} // '';
}});
__PACKAGE__->register_method ({
- name => 'listosd',
- path => 'osd',
- method => 'GET',
- description => "Get Ceph osd list/tree.",
+ name => 'set_flag',
+ path => 'flags/{flag}',
+ method => 'POST',
+ description => "Set a ceph flag",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
+ flag => {
+ description => 'The ceph flag to set/unset',
+ type => 'string',
+ enum => [ 'full', 'pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub', 'notieragent'],
+ },
},
},
- returns => {
- type => "object",
- },
+ returns => { type => 'null' },
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
-
- my $res = &$run_ceph_cmd_json(['osd', 'tree'], quiet => 1);
-
- die "no tree nodes found\n" if !($res && $res->{nodes});
-
- my $nodes = {};
- my $newnodes = {};
- foreach my $e (@{$res->{nodes}}) {
- $nodes->{$e->{id}} = $e;
-
- my $new = {
- id => $e->{id},
- name => $e->{name},
- type => $e->{type}
- };
-
- foreach my $opt (qw(status crush_weight reweight)) {
- $new->{$opt} = $e->{$opt} if defined($e->{$opt});
- }
-
- $newnodes->{$e->{id}} = $new;
- }
+ PVE::CephTools::check_ceph_inited();
- foreach my $e (@{$res->{nodes}}) {
- my $new = $newnodes->{$e->{id}};
- if ($e->{children} && scalar(@{$e->{children}})) {
- $new->{children} = [];
- $new->{leaf} = 0;
- foreach my $cid (@{$e->{children}}) {
- $nodes->{$cid}->{parent} = $e->{id};
- if ($nodes->{$cid}->{type} eq 'osd' &&
- $e->{type} eq 'host') {
- $newnodes->{$cid}->{host} = $e->{name};
- }
- push @{$new->{children}}, $newnodes->{$cid};
- }
- } else {
- $new->{leaf} = ($e->{id} >= 0) ? 1 : 0;
- }
- }
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
- my $rootnode;
- foreach my $e (@{$res->{nodes}}) {
- if (!$nodes->{$e->{id}}->{parent}) {
- $rootnode = $newnodes->{$e->{id}};
- last;
- }
- }
+ die "not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
- die "no root node\n" if !$rootnode;
+ my $set = $param->{set} // !$param->{unset};
+ my $rados = PVE::RADOS->new();
- my $data = { root => $rootnode };
+ $rados->mon_command({
+ prefix => "osd set",
+ key => $param->{flag},
+ });
- return $data;
+ return undef;
}});
__PACKAGE__->register_method ({
- name => 'createosd',
- path => 'osd',
- method => 'POST',
- description => "Create OSD",
+ name => 'unset_flag',
+ path => 'flags/{flag}',
+ method => 'DELETE',
+ description => "Unset a ceph flag",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- dev => {
- description => "Block device name.",
- type => 'string',
- },
- journal_dev => {
- description => "Block device name for journal.",
- optional => 1,
- type => 'string',
- },
- fstype => {
- description => "File system type.",
+ flag => {
+ description => 'The ceph flag to set/unset',
type => 'string',
- enum => ['xfs', 'ext4', 'btrfs'],
- default => 'xfs',
- optional => 1,
+ enum => [ 'full', 'pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub', 'notieragent'],
},
},
},
- returns => { type => 'string' },
+ returns => { type => 'null' },
code => sub {
my ($param) = @_;
- my $rpcenv = PVE::RPCEnvironment::get();
-
- my $authuser = $rpcenv->get_user();
-
- &$check_ceph_inited();
-
- &$setup_pve_symlinks();
-
- my $journal_dev;
-
- if ($param->{journal_dev} && ($param->{journal_dev} ne $param->{dev})) {
- -b $param->{journal_dev} || die "no such block device '$param->{journal_dev}'\n";
- $journal_dev = $param->{journal_dev};
- }
-
- -b $param->{dev} || die "no such block device '$param->{dev}'\n";
-
- my $disklist = list_disks();
-
- my $devname = $param->{dev};
- $devname =~ s|/dev/||;
-
- my $diskinfo = $disklist->{$devname};
- die "unable to get device info for '$devname'\n"
- if !$diskinfo;
-
- die "device '$param->{dev}' is in use\n"
- if $diskinfo->{used};
-
- my $monstat = ceph_mon_status(1);
- die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
- my $fsid = $monstat->{monmap}->{fsid};
-
- if (! -f $ceph_bootstrap_osd_keyring) {
- &$run_ceph_cmd(['auth', 'get', 'client.bootstrap-osd', '-o', $ceph_bootstrap_osd_keyring]);
- };
-
- my $worker = sub {
- my $upid = shift;
+ PVE::CephTools::check_ceph_inited();
- my $fstype = $param->{fstype} || 'xfs';
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
- print "create OSD on $param->{dev} ($fstype)\n";
+ die "not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
- my $cmd = ['ceph-disk', 'prepare', '--zap-disk', '--fs-type', $fstype,
- '--cluster', $ccname, '--cluster-uuid', $fsid ];
+ my $set = $param->{set} // !$param->{unset};
+ my $rados = PVE::RADOS->new();
- if ($journal_dev) {
- print "using device '$journal_dev' for journal\n";
- push @$cmd, '--journal-dev', $param->{dev}, $journal_dev;
- } else {
- push @$cmd, $param->{dev};
- }
-
- run_command($cmd);
- };
+ $rados->mon_command({
+ prefix => "osd unset",
+ key => $param->{flag},
+ });
- return $rpcenv->fork_worker('cephcreateosd', $devname, $authuser, $worker);
+ return undef;
}});
__PACKAGE__->register_method ({
- name => 'destroyosd',
- path => 'osd/{osdid}',
+ name => 'destroypool',
+ path => 'pools/{name}',
method => 'DELETE',
- description => "Destroy OSD",
+ description => "Destroy pool",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- osdid => {
- description => 'OSD ID',
- type => 'integer',
+ name => {
+ description => "The name of the pool. It must be unique.",
+ type => 'string',
},
- cleanup => {
- description => "If set, we remove partition table entries.",
+ force => {
+ description => "If true, destroys pool even if in use",
type => 'boolean',
optional => 1,
default => 0,
- },
+ }
},
},
- returns => { type => 'string' },
+ returns => { type => 'null' },
code => sub {
my ($param) = @_;
- my $rpcenv = PVE::RPCEnvironment::get();
-
- my $authuser = $rpcenv->get_user();
-
- &$check_ceph_inited();
-
- my $osdid = $param->{osdid};
-
- # fixme: not 100% sure what we should do here
-
- my $stat = &$ceph_osd_status();
-
- my $osdlist = $stat->{osds} || [];
-
- my $osdstat;
- foreach my $d (@$osdlist) {
- if ($d->{osd} == $osdid) {
- $osdstat = $d;
- last;
+ PVE::CephTools::check_ceph_inited();
+
+ # if not forced, destroy ceph pool only when no
+ # vm disks are on it anymore
+ if (!$param->{force}) {
+ my $storagecfg = PVE::Storage::config();
+ foreach my $storageid (keys %{$storagecfg->{ids}}) {
+ my $storage = $storagecfg->{ids}->{$storageid};
+ next if $storage->{type} ne 'rbd';
+ next if $storage->{pool} ne $param->{name};
+
+ # check if any vm disks are on the pool
+ my $res = PVE::Storage::vdisk_list($storagecfg, $storageid);
+ die "ceph pool '$param->{name}' still in use by storage '$storageid'\n"
+ if @{$res->{$storageid}} != 0;
}
}
- die "no such OSD '$osdid'\n" if !$osdstat;
-
- die "osd is in use (in == 1)\n" if $osdstat->{in};
- #&$run_ceph_cmd(['osd', 'out', $osdid]);
-
- die "osd is still runnung (up == 1)\n" if $osdstat->{up};
-
- my $osdsection = "osd.$osdid";
-
- my $worker = sub {
- my $upid = shift;
-
- print "destroy OSD $osdsection\n";
-
- eval { &$ceph_service_cmd('stop', $osdsection); };
- warn $@ if $@;
-
- print "Remove $osdsection from the CRUSH map\n";
- &$run_ceph_cmd(['osd', 'crush', 'remove', $osdsection]);
-
- print "Remove the $osdsection authentication key.\n";
- &$run_ceph_cmd(['auth', 'del', $osdsection]);
-
- print "Remove OSD $osdsection\n";
- &$run_ceph_cmd(['osd', 'rm', $osdid]);
-
- # try to unmount from standard mount point
- my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
-
- my $remove_partition = sub {
- my ($disklist, $part) = @_;
-
- return if !$part || (! -b $part );
-
- foreach my $real_dev (keys %$disklist) {
- my $diskinfo = $disklist->{$real_dev};
- next if !$diskinfo->{gpt};
- if ($part =~ m|^/dev/${real_dev}(\d+)$|) {
- my $partnum = $1;
- print "remove partition $part (disk '/dev/${real_dev}', partnum $partnum)\n";
- eval { run_command(['/sbin/sgdisk', '-d', $partnum, "/dev/${real_dev}"]); };
- warn $@ if $@;
- last;
- }
- }
- };
-
- my $journal_part;
- my $data_part;
-
- if ($param->{cleanup}) {
- my $jpath = "$mountpoint/journal";
- $journal_part = abs_path($jpath);
-
- if (my $fd = IO::File->new("/proc/mounts", "r")) {
- while (defined(my $line = <$fd>)) {
- my ($dev, $path, $fstype) = split(/\s+/, $line);
- next if !($dev && $path && $fstype);
- next if $dev !~ m|^/dev/|;
- if ($path eq $mountpoint) {
- $data_part = abs_path($dev);
- last;
- }
- }
- close($fd);
- }
- }
- print "Unmount OSD $osdsection from $mountpoint\n";
- eval { run_command(['umount', $mountpoint]); };
- if (my $err = $@) {
- warn $err;
- } elsif ($param->{cleanup}) {
- my $disklist = list_disks();
- &$remove_partition($disklist, $journal_part);
- &$remove_partition($disklist, $data_part);
- }
- };
+ my $rados = PVE::RADOS->new();
+ # fixme: '--yes-i-really-really-mean-it'
+ $rados->mon_command({
+ prefix => "osd pool delete",
+ pool => $param->{name},
+ pool2 => $param->{name},
+ sure => '--yes-i-really-really-mean-it',
+ format => 'plain',
+ });
- return $rpcenv->fork_worker('cephdestroyosd', $osdsection, $authuser, $worker);
+ return undef;
}});
+
__PACKAGE__->register_method ({
name => 'crush',
path => 'crush',
description => "Get OSD crush map",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
code => sub {
my ($param) = @_;
- &$check_ceph_inited();
+ PVE::CephTools::check_ceph_inited();
# this produces JSON (difficult to read for the user)
# my $txt = &$run_ceph_cmd_text(['osd', 'crush', 'dump'], quiet => 1);
my $mapfile = "/var/tmp/ceph-crush.map.$$";
my $mapdata = "/var/tmp/ceph-crush.txt.$$";
+ my $rados = PVE::RADOS->new();
+
eval {
- &$run_ceph_cmd(['osd', 'getcrushmap', '-o', $mapfile]);
+ my $bindata = $rados->mon_command({ prefix => 'osd getcrushmap', format => 'plain' });
+ PVE::Tools::file_set_contents($mapfile, $bindata);
run_command(['crushtool', '-d', $mapfile, '-o', $mapdata]);
$txt = PVE::Tools::file_get_contents($mapdata);
};