]>
Commit | Line | Data |
---|---|---|
224ce89b WB |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | /* | |
4 | * Ceph - scalable distributed file system | |
5 | * | |
6 | * Copyright (C) 2016 Red Hat Inc. | |
7 | * | |
8 | * This is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License version 2.1, as published by the Free Software | |
11 | * Foundation. See file COPYING. | |
12 | * | |
13 | */ | |
14 | ||
15 | ||
16 | #include <memory> | |
17 | ||
18 | #include "osd/mClockClientQueue.h" | |
19 | #include "common/dout.h" | |
20 | ||
21 | ||
22 | namespace dmc = crimson::dmclock; | |
23 | ||
24 | ||
25 | #define dout_context cct | |
26 | #define dout_subsys ceph_subsys_osd | |
27 | #undef dout_prefix | |
28 | #define dout_prefix *_dout | |
29 | ||
30 | ||
31 | namespace ceph { | |
32 | ||
33 | mClockClientQueue::mclock_op_tags_t::mclock_op_tags_t(CephContext *cct) : | |
34 | client_op(cct->_conf->osd_op_queue_mclock_client_op_res, | |
35 | cct->_conf->osd_op_queue_mclock_client_op_wgt, | |
36 | cct->_conf->osd_op_queue_mclock_client_op_lim), | |
37 | osd_subop(cct->_conf->osd_op_queue_mclock_osd_subop_res, | |
38 | cct->_conf->osd_op_queue_mclock_osd_subop_wgt, | |
39 | cct->_conf->osd_op_queue_mclock_osd_subop_lim), | |
40 | snaptrim(cct->_conf->osd_op_queue_mclock_snap_res, | |
41 | cct->_conf->osd_op_queue_mclock_snap_wgt, | |
42 | cct->_conf->osd_op_queue_mclock_snap_lim), | |
43 | recov(cct->_conf->osd_op_queue_mclock_recov_res, | |
44 | cct->_conf->osd_op_queue_mclock_recov_wgt, | |
45 | cct->_conf->osd_op_queue_mclock_recov_lim), | |
46 | scrub(cct->_conf->osd_op_queue_mclock_scrub_res, | |
47 | cct->_conf->osd_op_queue_mclock_scrub_wgt, | |
48 | cct->_conf->osd_op_queue_mclock_scrub_lim) | |
49 | { | |
50 | dout(20) << | |
51 | "mClockClientQueue settings:: " << | |
52 | "client_op:" << client_op << | |
53 | "; osd_subop:" << osd_subop << | |
54 | "; snaptrim:" << snaptrim << | |
55 | "; recov:" << recov << | |
56 | "; scrub:" << scrub << | |
57 | dendl; | |
58 | } | |
59 | ||
60 | ||
61 | dmc::ClientInfo | |
62 | mClockClientQueue::op_class_client_info_f( | |
63 | const mClockClientQueue::InnerClient& client) | |
64 | { | |
65 | switch(client.second) { | |
66 | case osd_op_type_t::client_op: | |
67 | return mclock_op_tags->client_op; | |
68 | case osd_op_type_t::osd_subop: | |
69 | return mclock_op_tags->osd_subop; | |
70 | case osd_op_type_t::bg_snaptrim: | |
71 | return mclock_op_tags->snaptrim; | |
72 | case osd_op_type_t::bg_recovery: | |
73 | return mclock_op_tags->recov; | |
74 | case osd_op_type_t::bg_scrub: | |
75 | return mclock_op_tags->scrub; | |
76 | default: | |
77 | assert(0); | |
78 | return dmc::ClientInfo(-1, -1, -1); | |
79 | } | |
80 | } | |
81 | ||
82 | ||
83 | /* | |
84 | * class mClockClientQueue | |
85 | */ | |
86 | ||
87 | std::unique_ptr<mClockClientQueue::mclock_op_tags_t> | |
88 | mClockClientQueue::mclock_op_tags(nullptr); | |
89 | ||
90 | mClockClientQueue::pg_queueable_visitor_t | |
91 | mClockClientQueue::pg_queueable_visitor; | |
92 | ||
93 | mClockClientQueue::mClockClientQueue(CephContext *cct) : | |
94 | queue(&mClockClientQueue::op_class_client_info_f) | |
95 | { | |
96 | // manage the singleton | |
97 | if (!mclock_op_tags) { | |
98 | mclock_op_tags.reset(new mclock_op_tags_t(cct)); | |
99 | } | |
100 | } | |
101 | ||
102 | mClockClientQueue::osd_op_type_t | |
103 | mClockClientQueue::get_osd_op_type(const Request& request) { | |
104 | osd_op_type_t type = | |
105 | boost::apply_visitor(pg_queueable_visitor, request.second.get_variant()); | |
106 | ||
107 | // if we got client_op back then we need to distinguish between | |
108 | // a client op and an osd subop. | |
109 | ||
110 | if (osd_op_type_t::client_op != type) { | |
111 | return type; | |
112 | } else if (MSG_OSD_SUBOP == | |
113 | boost::get<OpRequestRef>( | |
114 | request.second.get_variant())->get_req()->get_header().type) { | |
115 | return osd_op_type_t::osd_subop; | |
116 | } else { | |
117 | return osd_op_type_t::client_op; | |
118 | } | |
119 | } | |
120 | ||
121 | mClockClientQueue::InnerClient | |
122 | inline mClockClientQueue::get_inner_client(const Client& cl, | |
123 | const Request& request) { | |
124 | return InnerClient(cl, get_osd_op_type(request)); | |
125 | } | |
126 | ||
127 | // Formatted output of the queue | |
128 | inline void mClockClientQueue::dump(ceph::Formatter *f) const { | |
129 | queue.dump(f); | |
130 | } | |
131 | ||
132 | inline void mClockClientQueue::enqueue_strict(Client cl, | |
133 | unsigned priority, | |
134 | Request item) { | |
135 | queue.enqueue_strict(get_inner_client(cl, item), priority, item); | |
136 | } | |
137 | ||
138 | // Enqueue op in the front of the strict queue | |
139 | inline void mClockClientQueue::enqueue_strict_front(Client cl, | |
140 | unsigned priority, | |
141 | Request item) { | |
142 | queue.enqueue_strict_front(get_inner_client(cl, item), priority, item); | |
143 | } | |
144 | ||
145 | // Enqueue op in the back of the regular queue | |
146 | inline void mClockClientQueue::enqueue(Client cl, | |
147 | unsigned priority, | |
148 | unsigned cost, | |
149 | Request item) { | |
150 | queue.enqueue(get_inner_client(cl, item), priority, cost, item); | |
151 | } | |
152 | ||
153 | // Enqueue the op in the front of the regular queue | |
154 | inline void mClockClientQueue::enqueue_front(Client cl, | |
155 | unsigned priority, | |
156 | unsigned cost, | |
157 | Request item) { | |
158 | queue.enqueue_front(get_inner_client(cl, item), priority, cost, item); | |
159 | } | |
160 | ||
161 | // Return an op to be dispatched | |
162 | inline Request mClockClientQueue::dequeue() { | |
163 | return queue.dequeue(); | |
164 | } | |
165 | } // namespace ceph |