#include "OSDMap.h"
#include <algorithm>
#include "common/config.h"
+#include "common/errno.h"
#include "common/Formatter.h"
#include "common/TextTable.h"
#include "include/ceph_features.h"
return num_osd;
}
-void OSDMap::count_full_nearfull_osds(int *full, int *backfill, int *nearfull) const
+void OSDMap::get_full_pools(CephContext *cct,
+ set<int64_t> *full,
+ set<int64_t> *backfillfull,
+ set<int64_t> *nearfull) const
{
- *full = 0;
- *backfill = 0;
- *nearfull = 0;
+ assert(full);
+ assert(backfillfull);
+ assert(nearfull);
+ full->clear();
+ backfillfull->clear();
+ nearfull->clear();
+
+ vector<int> full_osds;
+ vector<int> backfillfull_osds;
+ vector<int> nearfull_osds;
for (int i = 0; i < max_osd; ++i) {
if (exists(i) && is_up(i) && is_in(i)) {
if (osd_state[i] & CEPH_OSD_FULL)
- ++(*full);
+ full_osds.push_back(i);
else if (osd_state[i] & CEPH_OSD_BACKFILLFULL)
- ++(*backfill);
+ backfillfull_osds.push_back(i);
else if (osd_state[i] & CEPH_OSD_NEARFULL)
- ++(*nearfull);
+ nearfull_osds.push_back(i);
}
}
+
+ for (auto i: full_osds) {
+ get_pool_ids_by_osd(cct, i, full);
+ }
+ for (auto i: backfillfull_osds) {
+ get_pool_ids_by_osd(cct, i, backfillfull);
+ }
+ for (auto i: nearfull_osds) {
+ get_pool_ids_by_osd(cct, i, nearfull);
+ }
}
static bool get_osd_utilization(
if (!is_up(osd))
continue;
const osd_xinfo_t &xi = get_xinfo(osd);
+ if (xi.features == 0)
+ continue; // bogus xinfo, maybe #20751 or similar, skipping
if (first) {
cached_up_osd_features = xi.features;
first = false;
out << " nearfull";
}
-bool OSDMap::crush_ruleset_in_use(int ruleset) const
+bool OSDMap::crush_rule_in_use(int rule_id) const
{
for (const auto &pool : pools) {
- if (pool.second.crush_rule == ruleset)
+ if (pool.second.crush_rule == rule_id)
return true;
}
return false;
}
+int OSDMap::validate_crush_rules(CrushWrapper *newcrush,
+ ostream *ss) const
+{
+ for (auto& i : pools) {
+ auto& pool = i.second;
+ int ruleno = pool.get_crush_rule();
+ if (!newcrush->rule_exists(ruleno)) {
+ *ss << "pool " << i.first << " references crush_rule " << ruleno
+ << " but it is not present";
+ return -EINVAL;
+ }
+ if (newcrush->get_rule_mask_ruleset(ruleno) != ruleno) {
+ *ss << "rule " << ruleno << " mask ruleset does not match rule id";
+ return -EINVAL;
+ }
+ if (newcrush->get_rule_mask_type(ruleno) != (int)pool.get_type()) {
+ *ss << "pool " << i.first << " type does not match rule " << ruleno;
+ return -EINVAL;
+ }
+ if (pool.get_size() < (int)newcrush->get_rule_mask_min_size(ruleno) ||
+ pool.get_size() > (int)newcrush->get_rule_mask_max_size(ruleno)) {
+ *ss << "pool " << i.first << " size " << pool.get_size() << " does not"
+ << " fall within rule " << ruleno
+ << " min_size " << newcrush->get_rule_mask_min_size(ruleno)
+ << " and max_size " << newcrush->get_rule_mask_max_size(ruleno);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
int OSDMap::build_simple_optioned(CephContext *cct, epoch_t e, uuid_d &fsid,
int nosd, int pg_bits, int pgp_bits,
bool default_pool)
tmp.crush->get_rule_weight_osd_map(ruleno, &pmap);
ldout(cct,30) << __func__ << " pool " << i.first << " ruleno " << ruleno << dendl;
for (auto p : pmap) {
- osd_weight[p.first] += p.second;
- osd_weight_total += p.second;
+ auto adjusted_weight = tmp.get_weightf(p.first) * p.second;
+ osd_weight[p.first] += adjusted_weight;
+ osd_weight_total += adjusted_weight;
}
}
for (auto& i : osd_weight) {
return crush->get_leaves(name, osds);
}
+// get pools whose crush rules might reference the given osd
+void OSDMap::get_pool_ids_by_osd(CephContext *cct,
+ int osd,
+ set<int64_t> *pool_ids) const
+{
+ assert(pool_ids);
+ set<int> raw_rules;
+ int r = crush->get_rules_by_osd(osd, &raw_rules);
+ if (r < 0) {
+ lderr(cct) << __func__ << " get_rules_by_osd failed: " << cpp_strerror(r)
+ << dendl;
+ assert(r >= 0);
+ }
+ set<int> rules;
+ for (auto &i: raw_rules) {
+ // exclude any dead rule
+ if (crush_rule_in_use(i)) {
+ rules.insert(i);
+ }
+ }
+ for (auto &r: rules) {
+ get_pool_ids_by_rule(r, pool_ids);
+ }
+}
+
template <typename F>
class OSDUtilizationDumper : public CrushTreeDumper::Dumper<F> {
public:
{
// warn about flags
uint64_t warn_flags =
+ CEPH_OSDMAP_NEARFULL |
CEPH_OSDMAP_FULL |
CEPH_OSDMAP_PAUSERD |
CEPH_OSDMAP_PAUSEWR |
// OSD_UPGRADE_FINISHED
// none of these (yet) since we don't run until luminous upgrade is done.
- // POOL_FULL
+ // POOL_NEARFULL/BACKFILLFULL/FULL
{
- list<string> detail;
+ list<string> full_detail, backfillfull_detail, nearfull_detail;
for (auto it : get_pools()) {
const pg_pool_t &pool = it.second;
+ const string& pool_name = get_pool_name(it.first);
if (pool.has_flag(pg_pool_t::FLAG_FULL)) {
- const string& pool_name = get_pool_name(it.first);
stringstream ss;
- ss << "pool '" << pool_name << "' is full";
- detail.push_back(ss.str());
+ if (pool.has_flag(pg_pool_t::FLAG_FULL_NO_QUOTA)) {
+ // may run out of space too,
+ // but we want EQUOTA taking precedence
+ ss << "pool '" << pool_name << "' is full (no quota)";
+ } else {
+ ss << "pool '" << pool_name << "' is full (no space)";
+ }
+ full_detail.push_back(ss.str());
+ } else if (pool.has_flag(pg_pool_t::FLAG_BACKFILLFULL)) {
+ stringstream ss;
+ ss << "pool '" << pool_name << "' is backfillfull";
+ backfillfull_detail.push_back(ss.str());
+ } else if (pool.has_flag(pg_pool_t::FLAG_NEARFULL)) {
+ stringstream ss;
+ ss << "pool '" << pool_name << "' is nearfull";
+ nearfull_detail.push_back(ss.str());
}
}
- if (!detail.empty()) {
+ if (!full_detail.empty()) {
ostringstream ss;
- ss << detail.size() << " pool(s) full";
+ ss << full_detail.size() << " pool(s) full";
auto& d = checks->add("POOL_FULL", HEALTH_WARN, ss.str());
- d.detail.swap(detail);
+ d.detail.swap(full_detail);
+ }
+ if (!backfillfull_detail.empty()) {
+ ostringstream ss;
+ ss << backfillfull_detail.size() << " pool(s) backfillfull";
+ auto& d = checks->add("POOL_BACKFILLFULL", HEALTH_WARN, ss.str());
+ d.detail.swap(backfillfull_detail);
+ }
+ if (!nearfull_detail.empty()) {
+ ostringstream ss;
+ ss << nearfull_detail.size() << " pool(s) nearfull";
+ auto& d = checks->add("POOL_NEARFULL", HEALTH_WARN, ss.str());
+ d.detail.swap(nearfull_detail);
}
}
}