}
}
// delayed pg activation
- void queue_for_recovery(PG *pg, bool front = false) {
+ void queue_for_recovery(PG *pg) {
Mutex::Locker l(recovery_lock);
- if (front) {
+
+ if (pg->get_state() & (PG_STATE_FORCED_RECOVERY | PG_STATE_FORCED_BACKFILL)) {
awaiting_throttle.push_front(make_pair(pg->get_osdmap()->get_epoch(), pg));
} else {
awaiting_throttle.push_back(make_pair(pg->get_osdmap()->get_epoch(), pg));
_queue_for_recovery(make_pair(queued, pg), reserved_pushes);
}
+ void adjust_pg_priorities(vector<PG*> pgs, int newflags);
// osd map cache (past osd maps)
Mutex map_cache_lock;
void handle_pg_backfill_reserve(OpRequestRef op);
void handle_pg_recovery_reserve(OpRequestRef op);
+ void handle_force_recovery(Message *m);
+
void handle_pg_remove(OpRequestRef op);
void _remove_pg(PG *pg);
case MSG_OSD_REP_SCRUBMAP:
case MSG_OSD_PG_UPDATE_LOG_MISSING:
case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY:
+ case MSG_OSD_PG_RECOVERY_DELETE:
+ case MSG_OSD_PG_RECOVERY_DELETE_REPLY:
return true;
default:
return false;
int get_num_op_shards();
int get_num_op_threads();
+ float get_osd_recovery_sleep();
+
public:
static int peek_meta(ObjectStore *store, string& magic,
uuid_d& cluster_fsid, uuid_d& osd_fsid, int& whoami);