}
-void OSDService::queue_want_pg_temp(pg_t pgid, vector<int>& want)
+void OSDService::queue_want_pg_temp(pg_t pgid,
+ const vector<int>& want,
+ bool forced)
{
Mutex::Locker l(pg_temp_lock);
- map<pg_t,vector<int> >::iterator p = pg_temp_pending.find(pgid);
+ auto p = pg_temp_pending.find(pgid);
if (p == pg_temp_pending.end() ||
- p->second != want) {
- pg_temp_wanted[pgid] = want;
+ p->second.acting != want ||
+ forced) {
+ pg_temp_wanted[pgid] = pg_temp_t{want, forced};
}
}
void OSDService::_sent_pg_temp()
{
- for (map<pg_t,vector<int> >::iterator p = pg_temp_wanted.begin();
- p != pg_temp_wanted.end();
- ++p)
- pg_temp_pending[p->first] = p->second;
+ pg_temp_pending.insert(make_move_iterator(begin(pg_temp_wanted)),
+ make_move_iterator(end(pg_temp_wanted)));
pg_temp_wanted.clear();
}
<< pg_temp_wanted.size() << dendl;
}
+std::ostream& operator<<(std::ostream& out,
+ const OSDService::pg_temp_t& pg_temp)
+{
+ out << pg_temp.acting;
+ if (pg_temp.forced) {
+ out << " (forced)";
+ }
+ return out;
+}
+
void OSDService::send_pg_temp()
{
Mutex::Locker l(pg_temp_lock);
if (pg_temp_wanted.empty())
return;
dout(10) << "send_pg_temp " << pg_temp_wanted << dendl;
- MOSDPGTemp *m = new MOSDPGTemp(osdmap->get_epoch());
- m->pg_temp = pg_temp_wanted;
- monc->send_mon_message(m);
+ MOSDPGTemp *ms[2] = {nullptr, nullptr};
+ for (auto& pg_temp : pg_temp_wanted) {
+ auto& m = ms[pg_temp.second.forced];
+ if (!m) {
+ m = new MOSDPGTemp(osdmap->get_epoch());
+ m->forced = pg_temp.second.forced;
+ }
+ m->pg_temp.emplace(pg_temp.first,
+ pg_temp.second.acting);
+ }
+ for (auto m : ms) {
+ if (m) {
+ monc->send_mon_message(m);
+ }
+ }
_sent_pg_temp();
}
if (scrubs_pending + scrubs_active < cct->_conf->osd_max_scrubs) {
dout(20) << __func__ << " " << scrubs_pending << " -> " << (scrubs_pending+1)
- << " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
+ << " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active
+ << ")" << dendl;
can_inc = true;
} else {
- dout(20) << __func__ << scrubs_pending << " + " << scrubs_active << " active >= max " << cct->_conf->osd_max_scrubs << dendl;
+ dout(20) << __func__ << " " << scrubs_pending << " + " << scrubs_active
+ << " active >= max " << cct->_conf->osd_max_scrubs << dendl;
}
return can_inc;
MOSDMap *OSDService::build_incremental_map_msg(epoch_t since, epoch_t to,
OSDSuperblock& sblock)
{
- MOSDMap *m = new MOSDMap(monc->get_fsid());
+ MOSDMap *m = new MOSDMap(monc->get_fsid(),
+ osdmap->get_encoding_features());
m->oldest_map = max_oldest_map;
m->newest_map = sblock.newest_map;
OSDSuperblock sblock(get_superblock());
if (since < sblock.oldest_map) {
// just send latest full map
- MOSDMap *m = new MOSDMap(monc->get_fsid());
+ MOSDMap *m = new MOSDMap(monc->get_fsid(),
+ osdmap->get_encoding_features());
m->oldest_map = max_oldest_map;
m->newest_map = sblock.newest_map;
get_map_bl(to, m->maps[to]);
return r;
string key = cct->_conf->get_val<string>("key");
- lderr(cct) << "key " << key << dendl;
if (key.size()) {
r = store->write_meta("osd_key", key);
if (r < 0)
return r;
+ } else {
+ string keyfile = cct->_conf->get_val<string>("keyfile");
+ if (!keyfile.empty()) {
+ bufferlist keybl;
+ string err;
+ if (keyfile == "-") {
+ static_assert(1024 * 1024 >
+ (sizeof(CryptoKey) - sizeof(bufferptr) +
+ sizeof(__u16) + 16 /* AES_KEY_LEN */ + 3 - 1) / 3. * 4.,
+ "1MB should be enough for a base64 encoded CryptoKey");
+ r = keybl.read_fd(STDIN_FILENO, 1024 * 1024);
+ } else {
+ r = keybl.read_file(keyfile.c_str(), &err);
+ }
+ if (r < 0) {
+ derr << __func__ << " failed to read keyfile " << keyfile << ": "
+ << err << ": " << cpp_strerror(r) << dendl;
+ return r;
+ }
+ r = store->write_meta("osd_key", keybl.to_str());
+ if (r < 0)
+ return r;
+ }
}
r = store->write_meta("ready", "ready");
update_log_config();
peering_tp.start();
+
+ service.init();
+ service.publish_map(osdmap);
+ service.publish_superblock(superblock);
+ service.max_oldest_map = superblock.oldest_map;
+
osd_op_tp.start();
disk_tp.start();
command_tp.start();
tick_timer_without_osd_lock.add_event_after(cct->_conf->osd_heartbeat_interval, new C_Tick_WithoutOSDLock(this));
}
- service.init();
- service.publish_map(osdmap);
- service.publish_superblock(superblock);
- service.max_oldest_map = superblock.oldest_map;
-
osd_lock.Unlock();
r = monc->authenticate();
osd_plb.add_u64(
l_osd_pg_stray, "numpg_stray",
"Placement groups ready to be deleted from this osd");
+ osd_plb.add_u64(
+ l_osd_pg_removing, "numpg_removing",
+ "Placement groups queued for local deletion", "pgsr",
+ PerfCountersBuilder::PRIO_USEFUL);
osd_plb.add_u64(
l_osd_hb_to, "heartbeat_to_peers", "Heartbeat (ping) peers we send to");
osd_plb.add_u64_counter(l_osd_map, "map_messages", "OSD map messages");
pg->upgrade(store);
}
+ if (pg->dne()) {
+ dout(10) << "load_pgs " << *it << " deleting dne" << dendl;
+ pg->ch = nullptr;
+ service.pg_remove_epoch(pg->pg_id);
+ pg->unlock();
+ {
+ // Delete pg
+ RWLock::WLocker l(pg_map_lock);
+ auto p = pg_map.find(pg->get_pgid());
+ assert(p != pg_map.end() && p->second == pg);
+ dout(20) << __func__ << " removed pg " << pg << " from pg_map" << dendl;
+ pg_map.erase(p);
+ pg->put("PGMap");
+ }
+ recursive_remove_collection(cct, store, pgid, *it);
+ continue;
+ }
+
service.init_splits_between(pg->info.pgid, pg->get_osdmap(), osdmap);
// generate state for PG's current mapping
if (is_mon_create) {
pending_creates_from_mon++;
} else {
- pending_creates_from_osd.emplace(pgid.pgid);
+ bool is_primary = osdmap->get_pg_acting_rank(pgid.pgid, whoami) == 0;
+ pending_creates_from_osd.emplace(pgid.pgid, is_primary);
}
dout(5) << __func__ << " withhold creation of pg " << pgid
<< ": " << pg_map.size() << " >= "<< max_pgs_per_osd << dendl;
void OSD::resume_creating_pg()
{
bool do_sub_pg_creates = false;
- MOSDPGTemp *pgtemp = nullptr;
+ bool have_pending_creates = false;
{
const auto max_pgs_per_osd =
(cct->_conf->get_val<uint64_t>("mon_max_pg_per_osd") *
}
auto pg = pending_creates_from_osd.cbegin();
while (spare_pgs > 0 && pg != pending_creates_from_osd.cend()) {
- if (!pgtemp) {
- pgtemp = new MOSDPGTemp{osdmap->get_epoch()};
- }
+ dout(20) << __func__ << " pg " << pg->first << dendl;
vector<int> acting;
- osdmap->pg_to_up_acting_osds(*pg, nullptr, nullptr, &acting, nullptr);
- pgtemp->pg_temp[*pg] = twiddle(acting);
+ osdmap->pg_to_up_acting_osds(pg->first, nullptr, nullptr, &acting, nullptr);
+ service.queue_want_pg_temp(pg->first, twiddle(acting), true);
pg = pending_creates_from_osd.erase(pg);
+ do_sub_pg_creates = true;
spare_pgs--;
}
+ have_pending_creates = (pending_creates_from_mon > 0 ||
+ !pending_creates_from_osd.empty());
}
+
+ bool do_renew_subs = false;
if (do_sub_pg_creates) {
if (monc->sub_want("osd_pg_creates", last_pg_create_epoch, 0)) {
dout(4) << __func__ << ": resolicit pg creates from mon since "
<< last_pg_create_epoch << dendl;
- monc->renew_subs();
+ do_renew_subs = true;
+ }
+ }
+ version_t start = osdmap->get_epoch() + 1;
+ if (have_pending_creates) {
+ // don't miss any new osdmap deleting PGs
+ if (monc->sub_want("osdmap", start, 0)) {
+ dout(4) << __func__ << ": resolicit osdmap from mon since "
+ << start << dendl;
+ do_renew_subs = true;
+ }
+ } else if (do_sub_pg_creates) {
+ // no need to subscribe the osdmap continuously anymore
+ // once the pgtemp and/or mon_subscribe(pg_creates) is sent
+ if (monc->sub_want_increment("osdmap", start, CEPH_SUBSCRIBE_ONETIME)) {
+ dout(4) << __func__ << ": re-subscribe osdmap(onetime) since"
+ << start << dendl;
+ do_renew_subs = true;
}
}
- if (pgtemp) {
- pgtemp->forced = true;
- monc->send_mon_message(pgtemp);
+
+ if (do_renew_subs) {
+ monc->renew_subs();
}
+
+ service.send_pg_temp();
}
void OSD::build_initial_pg_history(
logger->set(l_osd_cached_crc, buffer::get_cached_crc());
logger->set(l_osd_cached_crc_adjusted, buffer::get_cached_crc_adjusted());
logger->set(l_osd_missed_crc, buffer::get_missed_crc());
+ logger->set(l_osd_pg_removing, remove_wq.get_remove_queue_len());
// osd_lock is not being held, which means the OSD state
// might change when doing the monitor report
}
}
- check_ops_in_flight();
+ mgrc.update_osd_health(get_health_metrics());
service.kick_recovery_queue();
tick_timer_without_osd_lock.add_event_after(OSD_TICK_INTERVAL, new C_Tick_WithoutOSDLock(this));
}
if (osdmap->get_epoch() == 0) {
derr << "waiting for initial osdmap" << dendl;
} else if (osdmap->is_destroyed(whoami)) {
- derr << "osdmap says I am destroyed, exiting" << dendl;
- exit(0);
+ derr << "osdmap says I am destroyed" << dendl;
+ // provide a small margin so we don't livelock seeing if we
+ // un-destroyed ourselves.
+ if (osdmap->get_epoch() > newest - 1) {
+ exit(0);
+ }
} else if (osdmap->test_flag(CEPH_OSDMAP_NOUP) || osdmap->is_noup(whoami)) {
derr << "osdmap NOUP flag is set, waiting for it to clear" << dendl;
} else if (!osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE)) {
}
-bool OSD::ms_verify_authorizer(Connection *con, int peer_type,
- int protocol, bufferlist& authorizer_data, bufferlist& authorizer_reply,
- bool& isvalid, CryptoKey& session_key)
+bool OSD::ms_verify_authorizer(
+ Connection *con, int peer_type,
+ int protocol, bufferlist& authorizer_data, bufferlist& authorizer_reply,
+ bool& isvalid, CryptoKey& session_key,
+ std::unique_ptr<AuthAuthorizerChallenge> *challenge)
{
AuthAuthorizeHandler *authorize_handler = 0;
switch (peer_type) {
isvalid = authorize_handler->verify_authorizer(
cct, keys,
authorizer_data, authorizer_reply, name, global_id, caps_info, session_key,
- &auid);
+ &auid, challenge);
} else {
dout(10) << __func__ << " no rotating_keys (yet), denied" << dendl;
isvalid = false;
struct tm bdt;
time_t tt = now.sec();
localtime_r(&tt, &bdt);
+
+ bool day_permit = false;
+ if (cct->_conf->osd_scrub_begin_week_day < cct->_conf->osd_scrub_end_week_day) {
+ if (bdt.tm_wday >= cct->_conf->osd_scrub_begin_week_day && bdt.tm_wday < cct->_conf->osd_scrub_end_week_day) {
+ day_permit = true;
+ }
+ } else {
+ if (bdt.tm_wday >= cct->_conf->osd_scrub_begin_week_day || bdt.tm_wday < cct->_conf->osd_scrub_end_week_day) {
+ day_permit = true;
+ }
+ }
+
+ if (!day_permit) {
+ dout(20) << __func__ << " should run between week day " << cct->_conf->osd_scrub_begin_week_day
+ << " - " << cct->_conf->osd_scrub_end_week_day
+ << " now " << bdt.tm_wday << " = no" << dendl;
+ return false;
+ }
+
bool time_permit = false;
if (cct->_conf->osd_scrub_begin_hour < cct->_conf->osd_scrub_end_hour) {
if (bdt.tm_hour >= cct->_conf->osd_scrub_begin_hour && bdt.tm_hour < cct->_conf->osd_scrub_end_hour) {
+vector<OSDHealthMetric> OSD::get_health_metrics()
+{
+ vector<OSDHealthMetric> metrics;
+ lock_guard<mutex> pending_creates_locker{pending_creates_lock};
+ auto n_primaries = pending_creates_from_mon;
+ for (const auto& create : pending_creates_from_osd) {
+ if (create.second) {
+ n_primaries++;
+ }
+ }
+ metrics.emplace_back(osd_metric::PENDING_CREATING_PGS, n_primaries);
+ return metrics;
+}
+
// =====================================================
// MAP
lock_guard<mutex> pending_creates_locker{pending_creates_lock};
for (auto pg = pending_creates_from_osd.cbegin();
pg != pending_creates_from_osd.cend();) {
- if (osdmap->get_pg_acting_rank(*pg, whoami) < 0) {
+ if (osdmap->get_pg_acting_rank(pg->first, whoami) < 0) {
pg = pending_creates_from_osd.erase(pg);
} else {
++pg;
logger->set(l_osd_pg_primary, num_pg_primary);
logger->set(l_osd_pg_replica, num_pg_replica);
logger->set(l_osd_pg_stray, num_pg_stray);
+ logger->set(l_osd_pg_removing, remove_wq.get_remove_queue_len());
}
void OSD::activate_map()
* queue_recovery_after_sleep.
*/
float recovery_sleep = get_osd_recovery_sleep();
- if (recovery_sleep > 0 && service.recovery_needs_sleep) {
- PGRef pgref(pg);
- auto recovery_requeue_callback = new FunctionContext([this, pgref, queued, reserved_pushes](int r) {
- dout(20) << "do_recovery wake up at "
- << ceph_clock_now()
- << ", re-queuing recovery" << dendl;
- service.recovery_needs_sleep = false;
- service.queue_recovery_after_sleep(pgref.get(), queued, reserved_pushes);
- });
+ {
Mutex::Locker l(service.recovery_sleep_lock);
-
- // This is true for the first recovery op and when the previous recovery op
- // has been scheduled in the past. The next recovery op is scheduled after
- // completing the sleep from now.
- if (service.recovery_schedule_time < ceph_clock_now()) {
- service.recovery_schedule_time = ceph_clock_now();
- }
- service.recovery_schedule_time += recovery_sleep;
- service.recovery_sleep_timer.add_event_at(service.recovery_schedule_time,
- recovery_requeue_callback);
- dout(20) << "Recovery event scheduled at "
- << service.recovery_schedule_time << dendl;
- return;
+ if (recovery_sleep > 0 && service.recovery_needs_sleep) {
+ PGRef pgref(pg);
+ auto recovery_requeue_callback = new FunctionContext([this, pgref, queued, reserved_pushes](int r) {
+ dout(20) << "do_recovery wake up at "
+ << ceph_clock_now()
+ << ", re-queuing recovery" << dendl;
+ Mutex::Locker l(service.recovery_sleep_lock);
+ service.recovery_needs_sleep = false;
+ service.queue_recovery_after_sleep(pgref.get(), queued, reserved_pushes);
+ });
+
+ // This is true for the first recovery op and when the previous recovery op
+ // has been scheduled in the past. The next recovery op is scheduled after
+ // completing the sleep from now.
+ if (service.recovery_schedule_time < ceph_clock_now()) {
+ service.recovery_schedule_time = ceph_clock_now();
+ }
+ service.recovery_schedule_time += recovery_sleep;
+ service.recovery_sleep_timer.add_event_at(service.recovery_schedule_time,
+ recovery_requeue_callback);
+ dout(20) << "Recovery event scheduled at "
+ << service.recovery_schedule_time << dendl;
+ return;
+ }
}
{
- service.recovery_needs_sleep = true;
+ {
+ Mutex::Locker l(service.recovery_sleep_lock);
+ service.recovery_needs_sleep = true;
+ }
+
if (pg->pg_has_reset_since(queued)) {
goto out;
}
uint32_t shard_index = pgid.hash_to_shard(shard_list.size());
auto sdata = shard_list[shard_index];
bool queued = false;
- unsigned pushes_to_free = 0;
{
Mutex::Locker l(sdata->sdata_op_ordering_lock);
auto p = sdata->pg_slots.find(pgid);
++i) {
sdata->_enqueue_front(make_pair(pgid, *i), osd->op_prio_cutoff);
}
- for (auto& q : p->second.to_process) {
- pushes_to_free += q.get_reserved_pushes();
- }
p->second.to_process.clear();
p->second.waiting_for_pg = false;
++p->second.requeue_seq;
queued = true;
}
}
- if (pushes_to_free > 0) {
- osd->service.release_reserved_pushes(pushes_to_free);
- }
if (queued) {
sdata->sdata_lock.Lock();
sdata->sdata_cond.SignalOne();