1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2016 Red Hat Inc.
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
15 #include "osd/scheduler/OpSchedulerItem.h"
17 #include "osd/osd_tracer.h"
20 namespace ceph::osd::scheduler
{
22 std::ostream
& operator<<(std::ostream
& out
, const op_scheduler_class
& class_id
) {
23 out
<< static_cast<size_t>(class_id
);
31 ThreadPool::TPHandle
&handle
)
33 osd
->dequeue_op(pg
, op
, handle
);
37 void PGPeeringItem::run(
41 ThreadPool::TPHandle
&handle
)
43 osd
->dequeue_peering_evt(sdata
, pg
.get(), evt
, handle
);
50 ThreadPool::TPHandle
&handle
)
52 pg
->snap_trimmer(epoch_queued
);
56 void PGScrub::run(OSD
* osd
, OSDShard
* sdata
, PGRef
& pg
, ThreadPool::TPHandle
& handle
)
58 pg
->scrub(epoch_queued
, handle
);
62 void PGScrubAfterRepair::run(OSD
* osd
,
65 ThreadPool::TPHandle
& handle
)
67 pg
->recovery_scrub(epoch_queued
, handle
);
71 void PGScrubResched::run(OSD
* osd
,
74 ThreadPool::TPHandle
& handle
)
76 pg
->scrub_send_scrub_resched(epoch_queued
, handle
);
80 void PGScrubResourcesOK::run(OSD
* osd
,
83 ThreadPool::TPHandle
& handle
)
85 pg
->scrub_send_resources_granted(epoch_queued
, handle
);
89 void PGScrubDenied::run(OSD
* osd
,
92 ThreadPool::TPHandle
& handle
)
94 pg
->scrub_send_resources_denied(epoch_queued
, handle
);
98 void PGScrubPushesUpdate::run(OSD
* osd
,
101 ThreadPool::TPHandle
& handle
)
103 pg
->scrub_send_pushes_update(epoch_queued
, handle
);
107 void PGScrubAppliedUpdate::run(OSD
* osd
,
110 ThreadPool::TPHandle
& handle
)
112 pg
->scrub_send_applied_update(epoch_queued
, handle
);
116 void PGScrubUnblocked::run(OSD
* osd
,
119 ThreadPool::TPHandle
& handle
)
121 pg
->scrub_send_unblocking(epoch_queued
, handle
);
125 void PGScrubDigestUpdate::run(OSD
* osd
,
128 ThreadPool::TPHandle
& handle
)
130 pg
->scrub_send_digest_update(epoch_queued
, handle
);
134 void PGScrubGotLocalMap::run(OSD
* osd
,
137 ThreadPool::TPHandle
& handle
)
139 pg
->scrub_send_local_map_ready(epoch_queued
, handle
);
143 void PGScrubGotReplMaps::run(OSD
* osd
,
146 ThreadPool::TPHandle
& handle
)
148 pg
->scrub_send_replmaps_ready(epoch_queued
, handle
);
152 void PGRepScrub::run(OSD
* osd
, OSDShard
* sdata
, PGRef
& pg
, ThreadPool::TPHandle
& handle
)
154 pg
->replica_scrub(epoch_queued
, activation_index
, handle
);
158 void PGRepScrubResched::run(OSD
* osd
,
161 ThreadPool::TPHandle
& handle
)
163 pg
->replica_scrub_resched(epoch_queued
, activation_index
, handle
);
167 void PGScrubReplicaPushes::run([[maybe_unused
]] OSD
* osd
,
170 ThreadPool::TPHandle
& handle
)
172 pg
->scrub_send_replica_pushes(epoch_queued
, handle
);
176 void PGScrubScrubFinished::run([[maybe_unused
]] OSD
* osd
,
179 ThreadPool::TPHandle
& handle
)
181 pg
->scrub_send_scrub_is_finished(epoch_queued
, handle
);
185 void PGScrubGetNextChunk::run([[maybe_unused
]] OSD
* osd
,
188 ThreadPool::TPHandle
& handle
)
190 pg
->scrub_send_get_next_chunk(epoch_queued
, handle
);
194 void PGScrubChunkIsBusy::run([[maybe_unused
]] OSD
* osd
,
197 ThreadPool::TPHandle
& handle
)
199 pg
->scrub_send_chunk_busy(epoch_queued
, handle
);
203 void PGScrubChunkIsFree::run([[maybe_unused
]] OSD
* osd
,
206 ThreadPool::TPHandle
& handle
)
208 pg
->scrub_send_chunk_free(epoch_queued
, handle
);
212 void PGRecovery::run(
216 ThreadPool::TPHandle
&handle
)
219 l_osd_recovery_queue_lat
,
220 time_queued
- ceph_clock_now());
221 osd
->do_recovery(pg
.get(), epoch_queued
, reserved_pushes
, priority
, handle
);
225 void PGRecoveryContext::run(
229 ThreadPool::TPHandle
&handle
)
232 l_osd_recovery_context_queue_lat
,
233 time_queued
- ceph_clock_now());
234 c
.release()->complete(handle
);
242 ThreadPool::TPHandle
&handle
)
244 osd
->dequeue_delete(sdata
, pg
.get(), epoch_queued
, handle
);
247 void PGRecoveryMsg::run(
251 ThreadPool::TPHandle
&handle
)
253 auto latency
= time_queued
- ceph_clock_now();
254 switch (op
->get_req()->get_type()) {
255 case MSG_OSD_PG_PUSH
:
256 osd
->logger
->tinc(l_osd_recovery_push_queue_lat
, latency
);
257 case MSG_OSD_PG_PUSH_REPLY
:
258 osd
->logger
->tinc(l_osd_recovery_push_reply_queue_lat
, latency
);
259 case MSG_OSD_PG_PULL
:
260 osd
->logger
->tinc(l_osd_recovery_pull_queue_lat
, latency
);
261 case MSG_OSD_PG_BACKFILL
:
262 osd
->logger
->tinc(l_osd_recovery_backfill_queue_lat
, latency
);
263 case MSG_OSD_PG_BACKFILL_REMOVE
:
264 osd
->logger
->tinc(l_osd_recovery_backfill_remove_queue_lat
, latency
);
265 case MSG_OSD_PG_SCAN
:
266 osd
->logger
->tinc(l_osd_recovery_scan_queue_lat
, latency
);
268 osd
->dequeue_op(pg
, op
, handle
);