]> git.proxmox.com Git - ceph.git/blob - ceph/src/osd/scheduler/OpSchedulerItem.cc
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / osd / scheduler / OpSchedulerItem.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2016 Red Hat Inc.
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include "osd/scheduler/OpSchedulerItem.h"
16 #include "osd/OSD.h"
17 #include "osd/osd_tracer.h"
18
19
20 namespace ceph::osd::scheduler {
21
22 std::ostream& operator<<(std::ostream& out, const op_scheduler_class& class_id) {
23 out << static_cast<size_t>(class_id);
24 return out;
25 }
26
27 void PGOpItem::run(
28 OSD *osd,
29 OSDShard *sdata,
30 PGRef& pg,
31 ThreadPool::TPHandle &handle)
32 {
33 osd->dequeue_op(pg, op, handle);
34 pg->unlock();
35 }
36
37 void PGPeeringItem::run(
38 OSD *osd,
39 OSDShard *sdata,
40 PGRef& pg,
41 ThreadPool::TPHandle &handle)
42 {
43 osd->dequeue_peering_evt(sdata, pg.get(), evt, handle);
44 }
45
46 void PGSnapTrim::run(
47 OSD *osd,
48 OSDShard *sdata,
49 PGRef& pg,
50 ThreadPool::TPHandle &handle)
51 {
52 pg->snap_trimmer(epoch_queued);
53 pg->unlock();
54 }
55
56 void PGScrub::run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle)
57 {
58 pg->scrub(epoch_queued, handle);
59 pg->unlock();
60 }
61
62 void PGScrubAfterRepair::run(OSD* osd,
63 OSDShard* sdata,
64 PGRef& pg,
65 ThreadPool::TPHandle& handle)
66 {
67 pg->recovery_scrub(epoch_queued, handle);
68 pg->unlock();
69 }
70
71 void PGScrubResched::run(OSD* osd,
72 OSDShard* sdata,
73 PGRef& pg,
74 ThreadPool::TPHandle& handle)
75 {
76 pg->scrub_send_scrub_resched(epoch_queued, handle);
77 pg->unlock();
78 }
79
80 void PGScrubResourcesOK::run(OSD* osd,
81 OSDShard* sdata,
82 PGRef& pg,
83 ThreadPool::TPHandle& handle)
84 {
85 pg->scrub_send_resources_granted(epoch_queued, handle);
86 pg->unlock();
87 }
88
89 void PGScrubDenied::run(OSD* osd,
90 OSDShard* sdata,
91 PGRef& pg,
92 ThreadPool::TPHandle& handle)
93 {
94 pg->scrub_send_resources_denied(epoch_queued, handle);
95 pg->unlock();
96 }
97
98 void PGScrubPushesUpdate::run(OSD* osd,
99 OSDShard* sdata,
100 PGRef& pg,
101 ThreadPool::TPHandle& handle)
102 {
103 pg->scrub_send_pushes_update(epoch_queued, handle);
104 pg->unlock();
105 }
106
107 void PGScrubAppliedUpdate::run(OSD* osd,
108 OSDShard* sdata,
109 PGRef& pg,
110 ThreadPool::TPHandle& handle)
111 {
112 pg->scrub_send_applied_update(epoch_queued, handle);
113 pg->unlock();
114 }
115
116 void PGScrubUnblocked::run(OSD* osd,
117 OSDShard* sdata,
118 PGRef& pg,
119 ThreadPool::TPHandle& handle)
120 {
121 pg->scrub_send_unblocking(epoch_queued, handle);
122 pg->unlock();
123 }
124
125 void PGScrubDigestUpdate::run(OSD* osd,
126 OSDShard* sdata,
127 PGRef& pg,
128 ThreadPool::TPHandle& handle)
129 {
130 pg->scrub_send_digest_update(epoch_queued, handle);
131 pg->unlock();
132 }
133
134 void PGScrubGotLocalMap::run(OSD* osd,
135 OSDShard* sdata,
136 PGRef& pg,
137 ThreadPool::TPHandle& handle)
138 {
139 pg->scrub_send_local_map_ready(epoch_queued, handle);
140 pg->unlock();
141 }
142
143 void PGScrubGotReplMaps::run(OSD* osd,
144 OSDShard* sdata,
145 PGRef& pg,
146 ThreadPool::TPHandle& handle)
147 {
148 pg->scrub_send_replmaps_ready(epoch_queued, handle);
149 pg->unlock();
150 }
151
152 void PGRepScrub::run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle)
153 {
154 pg->replica_scrub(epoch_queued, activation_index, handle);
155 pg->unlock();
156 }
157
158 void PGRepScrubResched::run(OSD* osd,
159 OSDShard* sdata,
160 PGRef& pg,
161 ThreadPool::TPHandle& handle)
162 {
163 pg->replica_scrub_resched(epoch_queued, activation_index, handle);
164 pg->unlock();
165 }
166
167 void PGScrubReplicaPushes::run([[maybe_unused]] OSD* osd,
168 OSDShard* sdata,
169 PGRef& pg,
170 ThreadPool::TPHandle& handle)
171 {
172 pg->scrub_send_replica_pushes(epoch_queued, handle);
173 pg->unlock();
174 }
175
176 void PGScrubScrubFinished::run([[maybe_unused]] OSD* osd,
177 OSDShard* sdata,
178 PGRef& pg,
179 ThreadPool::TPHandle& handle)
180 {
181 pg->scrub_send_scrub_is_finished(epoch_queued, handle);
182 pg->unlock();
183 }
184
185 void PGScrubGetNextChunk::run([[maybe_unused]] OSD* osd,
186 OSDShard* sdata,
187 PGRef& pg,
188 ThreadPool::TPHandle& handle)
189 {
190 pg->scrub_send_get_next_chunk(epoch_queued, handle);
191 pg->unlock();
192 }
193
194 void PGScrubChunkIsBusy::run([[maybe_unused]] OSD* osd,
195 OSDShard* sdata,
196 PGRef& pg,
197 ThreadPool::TPHandle& handle)
198 {
199 pg->scrub_send_chunk_busy(epoch_queued, handle);
200 pg->unlock();
201 }
202
203 void PGScrubChunkIsFree::run([[maybe_unused]] OSD* osd,
204 OSDShard* sdata,
205 PGRef& pg,
206 ThreadPool::TPHandle& handle)
207 {
208 pg->scrub_send_chunk_free(epoch_queued, handle);
209 pg->unlock();
210 }
211
212 void PGRecovery::run(
213 OSD *osd,
214 OSDShard *sdata,
215 PGRef& pg,
216 ThreadPool::TPHandle &handle)
217 {
218 osd->logger->tinc(
219 l_osd_recovery_queue_lat,
220 time_queued - ceph_clock_now());
221 osd->do_recovery(pg.get(), epoch_queued, reserved_pushes, priority, handle);
222 pg->unlock();
223 }
224
225 void PGRecoveryContext::run(
226 OSD *osd,
227 OSDShard *sdata,
228 PGRef& pg,
229 ThreadPool::TPHandle &handle)
230 {
231 osd->logger->tinc(
232 l_osd_recovery_context_queue_lat,
233 time_queued - ceph_clock_now());
234 c.release()->complete(handle);
235 pg->unlock();
236 }
237
238 void PGDelete::run(
239 OSD *osd,
240 OSDShard *sdata,
241 PGRef& pg,
242 ThreadPool::TPHandle &handle)
243 {
244 osd->dequeue_delete(sdata, pg.get(), epoch_queued, handle);
245 }
246
247 void PGRecoveryMsg::run(
248 OSD *osd,
249 OSDShard *sdata,
250 PGRef& pg,
251 ThreadPool::TPHandle &handle)
252 {
253 auto latency = time_queued - ceph_clock_now();
254 switch (op->get_req()->get_type()) {
255 case MSG_OSD_PG_PUSH:
256 osd->logger->tinc(l_osd_recovery_push_queue_lat, latency);
257 case MSG_OSD_PG_PUSH_REPLY:
258 osd->logger->tinc(l_osd_recovery_push_reply_queue_lat, latency);
259 case MSG_OSD_PG_PULL:
260 osd->logger->tinc(l_osd_recovery_pull_queue_lat, latency);
261 case MSG_OSD_PG_BACKFILL:
262 osd->logger->tinc(l_osd_recovery_backfill_queue_lat, latency);
263 case MSG_OSD_PG_BACKFILL_REMOVE:
264 osd->logger->tinc(l_osd_recovery_backfill_remove_queue_lat, latency);
265 case MSG_OSD_PG_SCAN:
266 osd->logger->tinc(l_osd_recovery_scan_queue_lat, latency);
267 }
268 osd->dequeue_op(pg, op, handle);
269 pg->unlock();
270 }
271
272 }