]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/osd/scheduler/OpSchedulerItem.h
import quincy beta 17.1.0
[ceph.git] / ceph / src / osd / scheduler / OpSchedulerItem.h
index afa363e19e6895f7311819c5a2d305d3321b5499..68ee0d734c228416102e6a77305a6be8f6c26510 100644 (file)
@@ -36,6 +36,8 @@ enum class op_scheduler_class : uint8_t {
   client,
 };
 
+std::ostream& operator<<(std::ostream& out, const op_scheduler_class& class_id);
+
 class OpSchedulerItem {
 public:
   class OrderLocker {
@@ -104,6 +106,8 @@ private:
   utime_t start_time;
   uint64_t owner;  ///< global id (e.g., client.XXX)
   epoch_t map_epoch;    ///< an epoch we expect the PG to exist in
+  int qos_cost;  ///< scaled cost calculated by the mclock scheduler
+  bool qos_item;  ///< set to true if item is scheduled by mclock scheduler
 
 public:
   OpSchedulerItem(
@@ -119,7 +123,7 @@ public:
       start_time(start_time),
       owner(owner),
       map_epoch(e)
-  {}
+  { qos_cost = 0; qos_item = false; }
   OpSchedulerItem(OpSchedulerItem &&) = default;
   OpSchedulerItem(const OpSchedulerItem &) = delete;
   OpSchedulerItem &operator=(OpSchedulerItem &&) = default;
@@ -169,15 +173,45 @@ public:
     return qitem->get_scheduler_class();
   }
 
+  void maybe_set_is_qos_item() {
+    if (get_scheduler_class() != op_scheduler_class::immediate) {
+      qos_item = true ;
+    }
+  }
+
+  bool is_qos_item() const {
+    return qos_item;
+  }
+
+  void set_qos_cost(int scaled_cost) {
+    qos_cost = scaled_cost;
+  }
+
+  int get_qos_cost() const {
+    return qos_cost;
+  }
+
   friend std::ostream& operator<<(std::ostream& out, const OpSchedulerItem& item) {
-     out << "OpSchedulerItem("
-        << item.get_ordering_token() << " " << *item.qitem
-        << " prio " << item.get_priority()
-        << " cost " << item.get_cost()
-        << " e" << item.get_map_epoch();
-     if (item.get_reserved_pushes()) {
-       out << " reserved_pushes " << item.get_reserved_pushes();
-     }
+    out << "OpSchedulerItem("
+        << item.get_ordering_token() << " " << *item.qitem;
+
+    if (item.is_qos_item()) {
+      out << " class_id " << item.get_scheduler_class();
+    } else {
+      out << " prio " << item.get_priority();
+    }
+
+    if (item.get_qos_cost()) {
+      out << " qos_cost " << item.get_qos_cost();
+    }
+
+    out << " cost " << item.get_cost()
+        << " e" << item.get_map_epoch();
+
+    if (item.get_reserved_pushes()) {
+      out << " reserved_pushes " << item.get_reserved_pushes();
+    }
+
     return out << ")";
   }
 }; // class OpSchedulerItem
@@ -328,15 +362,29 @@ public:
 class PGScrubItem : public PGOpQueueable {
  protected:
   epoch_t epoch_queued;
+  Scrub::act_token_t activation_index;
   std::string_view message_name;
   PGScrubItem(spg_t pg, epoch_t epoch_queued, std::string_view derivative_name)
-      : PGOpQueueable{pg}, epoch_queued{epoch_queued}, message_name{derivative_name}
+      : PGOpQueueable{pg}
+      , epoch_queued{epoch_queued}
+      , activation_index{0}
+      , message_name{derivative_name}
+  {}
+  PGScrubItem(spg_t pg,
+             epoch_t epoch_queued,
+             Scrub::act_token_t op_index,
+             std::string_view derivative_name)
+      : PGOpQueueable{pg}
+      , epoch_queued{epoch_queued}
+      , activation_index{op_index}
+      , message_name{derivative_name}
   {}
   op_type_t get_op_type() const final { return op_type_t::bg_scrub; }
   std::ostream& print(std::ostream& rhs) const final
   {
     return rhs << message_name << "(pgid=" << get_pgid()
-              << "epoch_queued=" << epoch_queued << ")";
+              << "epoch_queued=" << epoch_queued
+              << " scrub-token=" << activation_index << ")";
   }
   void run(OSD* osd,
           OSDShard* sdata,
@@ -428,6 +476,14 @@ class PGScrubDigestUpdate : public PGScrubItem {
   void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
 };
 
+class PGScrubGotLocalMap : public PGScrubItem {
+ public:
+  PGScrubGotLocalMap(spg_t pg, epoch_t epoch_queued)
+    : PGScrubItem{pg, epoch_queued, "PGScrubGotLocalMap"}
+  {}
+  void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
+};
+
 class PGScrubGotReplMaps : public PGScrubItem {
  public:
   PGScrubGotReplMaps(spg_t pg, epoch_t epoch_queued)
@@ -436,17 +492,26 @@ class PGScrubGotReplMaps : public PGScrubItem {
   void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
 };
 
+class PGScrubMapsCompared : public PGScrubItem {
+ public:
+  PGScrubMapsCompared(spg_t pg, epoch_t epoch_queued)
+    : PGScrubItem{pg, epoch_queued, "PGScrubMapsCompared"}
+  {}
+  void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
+};
+
 class PGRepScrub : public PGScrubItem {
  public:
-  PGRepScrub(spg_t pg, epoch_t epoch_queued) : PGScrubItem{pg, epoch_queued, "PGRepScrub"}
+  PGRepScrub(spg_t pg, epoch_t epoch_queued, Scrub::act_token_t op_token)
+      : PGScrubItem{pg, epoch_queued, op_token, "PGRepScrub"}
   {}
   void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
 };
 
 class PGRepScrubResched : public PGScrubItem {
  public:
-  PGRepScrubResched(spg_t pg, epoch_t epoch_queued)
-      : PGScrubItem{pg, epoch_queued, "PGRepScrubResched"}
+  PGRepScrubResched(spg_t pg, epoch_t epoch_queued, Scrub::act_token_t op_token)
+      : PGScrubItem{pg, epoch_queued, op_token, "PGRepScrubResched"}
   {}
   void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
 };
@@ -459,6 +524,38 @@ class PGScrubReplicaPushes : public PGScrubItem {
   void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
 };
 
+class PGScrubScrubFinished : public PGScrubItem {
+ public:
+  PGScrubScrubFinished(spg_t pg, epoch_t epoch_queued)
+    : PGScrubItem{pg, epoch_queued, "PGScrubScrubFinished"}
+  {}
+  void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
+};
+
+class PGScrubGetNextChunk : public PGScrubItem {
+ public:
+  PGScrubGetNextChunk(spg_t pg, epoch_t epoch_queued)
+    : PGScrubItem{pg, epoch_queued, "PGScrubGetNextChunk"}
+  {}
+  void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
+};
+
+class PGScrubChunkIsBusy : public PGScrubItem {
+ public:
+  PGScrubChunkIsBusy(spg_t pg, epoch_t epoch_queued)
+    : PGScrubItem{pg, epoch_queued, "PGScrubChunkIsBusy"}
+  {}
+  void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
+};
+
+class PGScrubChunkIsFree : public PGScrubItem {
+ public:
+  PGScrubChunkIsFree(spg_t pg, epoch_t epoch_queued)
+    : PGScrubItem{pg, epoch_queued, "PGScrubChunkIsFree"}
+  {}
+  void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
+};
+
 class PGRecovery : public PGOpQueueable {
   epoch_t epoch_queued;
   uint64_t reserved_pushes;