use strict;
use warnings;
use Cwd qw(abs_path);
+use Net::IP;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach);
use PVE::JSONSchema qw(get_standard_option);
use PVE::RADOS;
use PVE::CephTools;
+use PVE::Diskmanage;
use base qw(PVE::RESTHandler);
my $osdlist = $stat->{osds} || [];
+ my $flags = $stat->{flags} || undef;
+
my $osdstat;
foreach my $d (@$osdlist) {
$osdstat->{$d->{osd}} = $d if defined($d->{osd});
return $osdstat->{$osdid};
}
- return $osdstat;
+ return wantarray? ($osdstat, $flags):$osdstat;
};
my $get_osd_usage = sub {
description => "Get Ceph osd list/tree.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
die "no tree nodes found\n" if !($res && $res->{nodes});
- my $osdhash = &$get_osd_status($rados);
+ my ($osdhash, $flags) = &$get_osd_status($rados);
my $usagehash = &$get_osd_usage($rados);
}
}
- my $rootnode;
+ my $roots = [];
foreach my $e (@{$res->{nodes}}) {
if (!$nodes->{$e->{id}}->{parent}) {
- $rootnode = $newnodes->{$e->{id}};
- last;
+ push @$roots, $newnodes->{$e->{id}};
}
}
- die "no root node\n" if !$rootnode;
+ die "no root node\n" if !@$roots;
+
+ my $data = { root => { leaf => 0, children => $roots } };
- my $data = { root => $rootnode };
+ # we want this for the noout flag
+ $data->{flags} = $flags if $flags;
return $data;
}});
my $journal_dev;
if ($param->{journal_dev} && ($param->{journal_dev} ne $param->{dev})) {
- $journal_dev = PVE::CephTools::verify_blockdev_path($param->{journal_dev});
+ $journal_dev = PVE::Diskmanage::verify_blockdev_path($param->{journal_dev});
}
- $param->{dev} = PVE::CephTools::verify_blockdev_path($param->{dev});
-
- my $disklist = PVE::CephTools::list_disks();
+ $param->{dev} = PVE::Diskmanage::verify_blockdev_path($param->{dev});
my $devname = $param->{dev};
$devname =~ s|/dev/||;
-
+
+ my $disklist = PVE::Diskmanage::get_disks($devname, 1);
+
my $diskinfo = $disklist->{$devname};
die "unable to get device info for '$devname'\n"
if !$diskinfo;
die "device '$param->{dev}' is in use\n"
if $diskinfo->{used};
+ my $devpath = $diskinfo->{devpath};
my $rados = PVE::RADOS->new();
my $monstat = $rados->mon_command({ prefix => 'mon_status' });
die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
my $ceph_bootstrap_osd_keyring = PVE::CephTools::get_config('ceph_bootstrap_osd_keyring');
if (! -f $ceph_bootstrap_osd_keyring) {
- my $bindata = $rados->mon_command({ prefix => 'auth get client.bootstrap-osd', format => 'plain' });
+ my $bindata = $rados->mon_command({ prefix => 'auth get', entity => 'client.bootstrap-osd', format => 'plain' });
PVE::Tools::file_set_contents($ceph_bootstrap_osd_keyring, $bindata);
};
my $fstype = $param->{fstype} || 'xfs';
- print "create OSD on $param->{dev} ($fstype)\n";
+ print "create OSD on $devpath ($fstype)\n";
my $ccname = PVE::CephTools::get_config('ccname');
if ($journal_dev) {
print "using device '$journal_dev' for journal\n";
- push @$cmd, '--journal-dev', $param->{dev}, $journal_dev;
+ push @$cmd, '--journal-dev', $devpath, $journal_dev;
} else {
- push @$cmd, $param->{dev};
+ push @$cmd, $devpath;
}
run_command($cmd);
my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
my $remove_partition = sub {
- my ($disklist, $part) = @_;
+ my ($part) = @_;
return if !$part || (! -b $part );
-
- foreach my $real_dev (keys %$disklist) {
- my $diskinfo = $disklist->{$real_dev};
- next if !$diskinfo->{gpt};
- if ($part =~ m|^/dev/${real_dev}(\d+)$|) {
- my $partnum = $1;
- print "remove partition $part (disk '/dev/${real_dev}', partnum $partnum)\n";
- eval { run_command(['/sbin/sgdisk', '-d', $partnum, "/dev/${real_dev}"]); };
- warn $@ if $@;
- last;
- }
- }
+ my $partnum = PVE::Diskmanage::get_partnum($part);
+ my $devpath = PVE::Diskmanage::get_blockdev($part);
+
+ print "remove partition $part (disk '${devpath}', partnum $partnum)\n";
+ eval { run_command(['/sbin/sgdisk', '-d', $partnum, "${devpath}"]); };
+ warn $@ if $@;
};
my $journal_part;
}
print "Unmount OSD $osdsection from $mountpoint\n";
- eval { run_command(['umount', $mountpoint]); };
+ eval { run_command(['/bin/umount', $mountpoint]); };
if (my $err = $@) {
warn $err;
} elsif ($param->{cleanup}) {
- my $disklist = PVE::CephTools::list_disks();
- &$remove_partition($disklist, $journal_part);
- &$remove_partition($disklist, $data_part);
+ #be aware of the ceph udev rules which can remount.
+ &$remove_partition($data_part);
+ &$remove_partition($journal_part);
}
};
description => "ceph osd in",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
description => "ceph osd out",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
method => 'GET',
description => "Directory index.",
permissions => { user => 'all' },
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
{ name => 'config' },
{ name => 'log' },
{ name => 'disks' },
+ { name => 'flags' },
];
return $result;
description => "List local disks.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
PVE::CephTools::check_ceph_inited();
- my $disks = PVE::CephTools::list_disks();
+ my $disks = PVE::Diskmanage::get_disks(undef, 1);
my $res = [];
foreach my $dev (keys %$disks) {
name => 'config',
path => 'config',
method => 'GET',
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
description => "Get Ceph configuration.",
parameters => {
additionalProperties => 0,
description => "Get Ceph monitor list.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
description => "Create initial ceph default configuration and setup symlinks.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
default => 2,
optional => 1,
minimum => 1,
- maximum => 3,
+ maximum => 7,
},
pg_bits => {
- description => "Placement group bits, used to specify the default number of placement groups (Note: 'osd pool default pg num' does not work for deafult pools)",
+ description => "Placement group bits, used to specify the " .
+ "default number of placement groups.\n\nNOTE: 'osd pool " .
+ "default pg num' does not work for default pools.",
type => 'integer',
default => 6,
optional => 1,
minimum => 6,
maximum => 14,
},
+ disable_cephx => {
+ description => "Disable cephx authentification.\n\n" .
+ "WARNING: cephx is a security feature protecting against " .
+ "man-in-the-middle attacks. Only consider disabling cephx ".
+ "if your network is private!",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
},
},
returns => { type => 'null' },
UUID::generate($uuid);
UUID::unparse($uuid, $fsid);
+ my $auth = $param->{disable_cephx} ? 'none' : 'cephx';
+
$cfg->{global} = {
'fsid' => $fsid,
- 'auth supported' => 'cephx',
- 'auth cluster required' => 'cephx',
- 'auth service required' => 'cephx',
- 'auth client required' => 'cephx',
- 'filestore xattr use omap' => 'true',
+ 'auth cluster required' => $auth,
+ 'auth service required' => $auth,
+ 'auth client required' => $auth,
'osd journal size' => $pve_osd_default_journal_size,
'osd pool default min size' => 1,
+ 'mon allow pool delete' => 'true',
};
# this does not work for default pools
my $find_node_ip = sub {
my ($cidr) = @_;
- my $config = PVE::INotify::read_file('interfaces');
-
my $net = Net::IP->new($cidr) || die Net::IP::Error() . "\n";
+ my $id = $net->version == 6 ? 'address6' : 'address';
- foreach my $iface (keys %$config) {
- my $d = $config->{$iface};
- next if !$d->{address};
- my $a = Net::IP->new($d->{address});
+ my $config = PVE::INotify::read_file('interfaces');
+ my $ifaces = $config->{ifaces};
+
+ foreach my $iface (keys %$ifaces) {
+ my $d = $ifaces->{$iface};
+ next if !$d->{$id};
+ my $a = Net::IP->new($d->{$id});
next if !$a;
- return $d->{address} if $net->overlaps($a);
+ return $d->{$id} if $net->overlaps($a);
}
die "unable to find local address within network '$cidr'\n";
description => "Create Ceph Monitor",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
my $monaddrhash = {};
+ my $systemd_managed = PVE::CephTools::systemd_managed();
+
foreach my $section (keys %$cfg) {
next if $section eq 'global';
my $d = $cfg->{$section};
$ip = PVE::Cluster::remote_node_ip($param->{node});
}
- my $monaddr = "$ip:6789";
+ my $monaddr = Net::IP::ip_is_ipv6($ip) ? "[$ip]:6789" : "$ip:6789";
my $monname = $param->{node};
die "monitor '$monsection' already exists\n" if $cfg->{$monsection};
"--cap mds 'allow' " .
"--cap osd 'allow *' " .
"--cap mon 'allow *'");
+ run_command("cp $pve_mon_key_path.tmp /etc/ceph/ceph.client.admin.keyring") if $systemd_managed;
+ run_command("chown ceph:ceph /etc/ceph/ceph.client.admin.keyring") if $systemd_managed;
run_command("ceph-authtool $pve_mon_key_path.tmp --gen-key -n mon. --cap mon 'allow *'");
run_command("mv $pve_mon_key_path.tmp $pve_mon_key_path");
}
eval {
mkdir $mondir;
+ run_command("chown ceph:ceph $mondir") if $systemd_managed;
+
if ($moncount > 0) {
my $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
my $mapdata = $rados->mon_command({ prefix => 'mon getmap', format => 'plain' });
}
run_command("ceph-mon --mkfs -i $monid --monmap $monmap --keyring $pve_mon_key_path");
+ run_command("chown ceph:ceph -R $mondir") if $systemd_managed;
};
my $err = $@;
unlink $monmap;
PVE::CephTools::write_ceph_config($cfg);
- PVE::CephTools::ceph_service_cmd('start', $monsection);
+ my $create_keys_pid = fork();
+ if (!defined($create_keys_pid)) {
+ die "Could not spawn ceph-create-keys to create bootstrap keys\n";
+ } elsif ($create_keys_pid == 0) {
+ exit PVE::Tools::run_command(['ceph-create-keys', '-i', $monid]);
+ } else {
+ PVE::CephTools::ceph_service_cmd('start', $monsection);
+
+ if ($systemd_managed) {
+ #to ensure we have the correct startup order.
+ eval { PVE::Tools::run_command(['/bin/systemctl', 'enable', "ceph-mon\@${monid}.service"]); };
+ warn "Enable ceph-mon\@${monid}.service manually"if $@;
+ }
+ waitpid($create_keys_pid, 0);
+ }
};
return $rpcenv->fork_worker('cephcreatemon', $monsection, $authuser, $worker);
description => "Destroy Ceph monitor.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
description => "Stop ceph services.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
description => "Start ceph services.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
description => "Get ceph status.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
description => "List all pools.",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {
my $stats = {};
my $res = $rados->mon_command({ prefix => 'df' });
- my $total = $res->{stats}->{total_space} || 0;
- $total = $total * 1024;
+ my $total = $res->{stats}->{total_avail_bytes} || 0;
+
foreach my $d (@{$res->{pools}}) {
next if !$d->{stats};
next if !defined($d->{id});
}
if (my $s = $stats->{$d->{pool}}) {
$d->{bytes_used} = $s->{bytes_used};
- $d->{percent_used} = ($d->{bytes_used}*100)/$total if $total;
+ $d->{percent_used} = ($s->{bytes_used} / $total)*100
+ if $s->{max_avail} && $total;
}
push @$data, $d;
}
description => "Create POOL",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
default => 2,
optional => 1,
minimum => 1,
- maximum => 3,
+ maximum => 7,
},
min_size => {
description => 'Minimum number of replicas per object',
default => 1,
optional => 1,
minimum => 1,
- maximum => 3,
+ maximum => 7,
},
pg_num => {
description => "Number of placement groups.",
return undef;
}});
+__PACKAGE__->register_method ({
+ name => 'get_flags',
+ path => 'flags',
+ method => 'GET',
+ description => "get all set ceph flags",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
+
+ die "not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
+
+ my $rados = PVE::RADOS->new();
+
+ my $stat = $rados->mon_command({ prefix => 'osd dump' });
+
+ return $stat->{flags} // '';
+ }});
+
+__PACKAGE__->register_method ({
+ name => 'set_flag',
+ path => 'flags/{flag}',
+ method => 'POST',
+ description => "Set a ceph flag",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ flag => {
+ description => 'The ceph flag to set/unset',
+ type => 'string',
+ enum => [ 'full', 'pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub', 'notieragent'],
+ },
+ },
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
+
+ die "not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
+
+ my $set = $param->{set} // !$param->{unset};
+ my $rados = PVE::RADOS->new();
+
+ $rados->mon_command({
+ prefix => "osd set",
+ key => $param->{flag},
+ });
+
+ return undef;
+ }});
+
+__PACKAGE__->register_method ({
+ name => 'unset_flag',
+ path => 'flags/{flag}',
+ method => 'DELETE',
+ description => "Unset a ceph flag",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ flag => {
+ description => 'The ceph flag to set/unset',
+ type => 'string',
+ enum => [ 'full', 'pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub', 'notieragent'],
+ },
+ },
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
+
+ die "not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
+
+ my $set = $param->{set} // !$param->{unset};
+ my $rados = PVE::RADOS->new();
+
+ $rados->mon_command({
+ prefix => "osd unset",
+ key => $param->{flag},
+ });
+
+ return undef;
+ }});
+
__PACKAGE__->register_method ({
name => 'destroypool',
path => 'pools/{name}',
description => "Destroy pool",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
parameters => {
additionalProperties => 0,
properties => {
description => "The name of the pool. It must be unique.",
type => 'string',
},
+ force => {
+ description => "If true, destroys pool even if in use",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ }
},
},
returns => { type => 'null' },
PVE::CephTools::check_ceph_inited();
+ # if not forced, destroy ceph pool only when no
+ # vm disks are on it anymore
+ if (!$param->{force}) {
+ my $storagecfg = PVE::Storage::config();
+ foreach my $storageid (keys %{$storagecfg->{ids}}) {
+ my $storage = $storagecfg->{ids}->{$storageid};
+ next if $storage->{type} ne 'rbd';
+ next if $storage->{pool} ne $param->{name};
+
+ # check if any vm disks are on the pool
+ my $res = PVE::Storage::vdisk_list($storagecfg, $storageid);
+ die "ceph pool '$param->{name}' still in use by storage '$storageid'\n"
+ if @{$res->{$storageid}} != 0;
+ }
+ }
+
my $rados = PVE::RADOS->new();
# fixme: '--yes-i-really-really-mean-it'
$rados->mon_command({
description => "Get OSD crush map",
proxyto => 'node',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
parameters => {
additionalProperties => 0,
properties => {