l_osd_pg_primary,
l_osd_pg_replica,
l_osd_pg_stray,
+ l_osd_pg_removing,
l_osd_hb_to,
l_osd_map,
l_osd_mape,
// -- pg_temp --
private:
Mutex pg_temp_lock;
- map<pg_t, vector<int> > pg_temp_wanted;
- map<pg_t, vector<int> > pg_temp_pending;
+ struct pg_temp_t {
+ pg_temp_t()
+ {}
+ pg_temp_t(vector<int> v, bool f)
+ : acting{v}, forced{f}
+ {}
+ vector<int> acting;
+ bool forced = false;
+ };
+ map<pg_t, pg_temp_t> pg_temp_wanted;
+ map<pg_t, pg_temp_t> pg_temp_pending;
void _sent_pg_temp();
+ friend std::ostream& operator<<(std::ostream&, const pg_temp_t&);
public:
- void queue_want_pg_temp(pg_t pgid, vector<int>& want);
+ void queue_want_pg_temp(pg_t pgid, const vector<int>& want,
+ bool forced = false);
void remove_want_pg_temp(pg_t pgid);
void requeue_pg_temp();
void send_pg_temp();
_queue_for_recovery(make_pair(queued, pg), reserved_pushes);
}
- void adjust_pg_priorities(vector<PG*> pgs, int newflags);
+ void adjust_pg_priorities(const vector<PGRef>& pgs, int newflags);
// osd map cache (past osd maps)
Mutex map_cache_lock;
void update_osd_stat(vector<int>& hb_peers);
osd_stat_t set_osd_stat(const struct store_statfs_t &stbuf,
- vector<int>& hb_peers);
+ vector<int>& hb_peers,
+ int num_pgs);
osd_stat_t get_osd_stat() {
Mutex::Locker l(stat_lock);
++seq;
return ret;
}
+ void request_osdmap_update(epoch_t e);
+
// -- stopping --
Mutex is_stopping_lock;
Cond is_stopping_cond;
std::string dev_path, journal_path;
bool store_is_rotational = true;
+ bool journal_is_rotational = true;
ZTracer::Endpoint trace_endpoint;
void create_logger();
void osdmap_subscribe(version_t epoch, bool force_request);
/** @} monc helpers */
+ Mutex osdmap_subscribe_lock;
+ epoch_t latest_subscribed_epoch{0};
+
// -- heartbeat --
/// information about a heartbeat peer
struct HeartbeatInfo {
RWLock pg_map_lock; // this lock orders *above* individual PG _locks
ceph::unordered_map<spg_t, PG*> pg_map; // protected by pg_map lock
+ std::mutex pending_creates_lock;
+ using create_from_osd_t = std::pair<pg_t, bool /* is primary*/>;
+ std::set<create_from_osd_t> pending_creates_from_osd;
+ unsigned pending_creates_from_mon = 0;
+
map<spg_t, list<PG::CephPeeringEvtRef> > peering_wait_for_split;
PGRecoveryStats pg_recovery_stats;
public:
PG *lookup_lock_pg(spg_t pgid);
+ int get_num_pgs() {
+ RWLock::RLocker l(pg_map_lock);
+ return pg_map.size();
+ }
+
protected:
PG *_open_lock_pg(OSDMapRef createmap,
spg_t pg, bool no_lockdep_check=false);
const PastIntervals& pi,
epoch_t epoch,
PG::CephPeeringEvtRef evt);
-
+ bool maybe_wait_for_max_pg(spg_t pgid, bool is_mon_create);
+ void resume_creating_pg();
+
void load_pgs();
void build_past_intervals_parallel();
void _clear() override {
remove_queue.clear();
}
+ int get_remove_queue_len() {
+ lock();
+ int r = remove_queue.size();
+ unlock();
+ return r;
+ }
} remove_wq;
+ // -- status reporting --
+ MPGStats *collect_pg_stats();
+ std::vector<OSDHealthMetric> get_health_metrics();
+
private:
bool ms_can_fast_dispatch_any() const override { return true; }
bool ms_can_fast_dispatch(const Message *m) const override {
srand(time(NULL));
unsigned which = rand() % (sizeof(index_lookup) / sizeof(index_lookup[0]));
return index_lookup[which];
- } else if (cct->_conf->osd_op_queue == "wpq") {
- return io_queue::weightedpriority;
+ } else if (cct->_conf->osd_op_queue == "prioritized") {
+ return io_queue::prioritized;
} else if (cct->_conf->osd_op_queue == "mclock_opclass") {
return io_queue::mclock_opclass;
} else if (cct->_conf->osd_op_queue == "mclock_client") {
return io_queue::mclock_client;
} else {
- return io_queue::prioritized;
+ // default / catch-all is 'wpq'
+ return io_queue::weightedpriority;
}
}
if (cct->_conf->osd_op_queue_cut_off == "debug_random") {
srand(time(NULL));
return (rand() % 2 < 1) ? CEPH_MSG_PRIO_HIGH : CEPH_MSG_PRIO_LOW;
- } else if (cct->_conf->osd_op_queue_cut_off == "low") {
- return CEPH_MSG_PRIO_LOW;
- } else {
+ } else if (cct->_conf->osd_op_queue_cut_off == "high") {
return CEPH_MSG_PRIO_HIGH;
+ } else {
+ // default / catch-all is 'low'
+ return CEPH_MSG_PRIO_LOW;
}
}
int update_crush_device_class();
int update_crush_location();
- static int write_meta(ObjectStore *store,
+ static int write_meta(CephContext *cct,
+ ObjectStore *store,
uuid_d& cluster_fsid, uuid_d& osd_fsid, int whoami);
void handle_pg_scrub(struct MOSDScrub *m, PG* pg);