]> git.proxmox.com Git - ceph.git/blob - ceph/src/osd/mClockOpClassQueue.cc
update sources to v12.1.1
[ceph.git] / ceph / src / osd / mClockOpClassQueue.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2016 Red Hat Inc.
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15
16 #include <memory>
17
18 #include "osd/mClockOpClassQueue.h"
19 #include "common/dout.h"
20
21
22 namespace dmc = crimson::dmclock;
23
24
25 #define dout_context cct
26 #define dout_subsys ceph_subsys_osd
27 #undef dout_prefix
28 #define dout_prefix *_dout
29
30
31 namespace ceph {
32
33 mClockOpClassQueue::mclock_op_tags_t::mclock_op_tags_t(CephContext *cct) :
34 client_op(cct->_conf->osd_op_queue_mclock_client_op_res,
35 cct->_conf->osd_op_queue_mclock_client_op_wgt,
36 cct->_conf->osd_op_queue_mclock_client_op_lim),
37 osd_subop(cct->_conf->osd_op_queue_mclock_osd_subop_res,
38 cct->_conf->osd_op_queue_mclock_osd_subop_wgt,
39 cct->_conf->osd_op_queue_mclock_osd_subop_lim),
40 snaptrim(cct->_conf->osd_op_queue_mclock_snap_res,
41 cct->_conf->osd_op_queue_mclock_snap_wgt,
42 cct->_conf->osd_op_queue_mclock_snap_lim),
43 recov(cct->_conf->osd_op_queue_mclock_recov_res,
44 cct->_conf->osd_op_queue_mclock_recov_wgt,
45 cct->_conf->osd_op_queue_mclock_recov_lim),
46 scrub(cct->_conf->osd_op_queue_mclock_scrub_res,
47 cct->_conf->osd_op_queue_mclock_scrub_wgt,
48 cct->_conf->osd_op_queue_mclock_scrub_lim)
49 {
50 dout(20) <<
51 "mClockOpClassQueue settings:: " <<
52 "client_op:" << client_op <<
53 "; osd_subop:" << osd_subop <<
54 "; snaptrim:" << snaptrim <<
55 "; recov:" << recov <<
56 "; scrub:" << scrub <<
57 dendl;
58 }
59
60
61 dmc::ClientInfo
62 mClockOpClassQueue::op_class_client_info_f(const osd_op_type_t& op_type) {
63 switch(op_type) {
64 case osd_op_type_t::client_op:
65 return mclock_op_tags->client_op;
66 case osd_op_type_t::osd_subop:
67 return mclock_op_tags->osd_subop;
68 case osd_op_type_t::bg_snaptrim:
69 return mclock_op_tags->snaptrim;
70 case osd_op_type_t::bg_recovery:
71 return mclock_op_tags->recov;
72 case osd_op_type_t::bg_scrub:
73 return mclock_op_tags->scrub;
74 default:
75 assert(0);
76 return dmc::ClientInfo(-1, -1, -1);
77 }
78 }
79
80 /*
81 * class mClockOpClassQueue
82 */
83
84 std::unique_ptr<mClockOpClassQueue::mclock_op_tags_t>
85 mClockOpClassQueue::mclock_op_tags(nullptr);
86
87 mClockOpClassQueue::pg_queueable_visitor_t
88 mClockOpClassQueue::pg_queueable_visitor;
89
90 mClockOpClassQueue::mClockOpClassQueue(CephContext *cct) :
91 queue(&mClockOpClassQueue::op_class_client_info_f)
92 {
93 // manage the singleton
94 if (!mclock_op_tags) {
95 mclock_op_tags.reset(new mclock_op_tags_t(cct));
96 }
97 }
98
99 mClockOpClassQueue::osd_op_type_t
100 mClockOpClassQueue::get_osd_op_type(const Request& request) {
101 osd_op_type_t type =
102 boost::apply_visitor(pg_queueable_visitor, request.second.get_variant());
103
104 // if we got client_op back then we need to distinguish between
105 // a client op and an osd subop.
106
107 if (osd_op_type_t::client_op != type) {
108 return type;
109 } else if (MSG_OSD_SUBOP ==
110 boost::get<OpRequestRef>(
111 request.second.get_variant())->get_req()->get_header().type) {
112 return osd_op_type_t::osd_subop;
113 } else {
114 return osd_op_type_t::client_op;
115 }
116 }
117
118 // Formatted output of the queue
119 void mClockOpClassQueue::dump(ceph::Formatter *f) const {
120 queue.dump(f);
121 }
122
123 } // namespace ceph