]> git.proxmox.com Git - ceph.git/blob - ceph/src/dmclock/sim/src/simulate.h
update sources to v12.1.0
[ceph.git] / ceph / src / dmclock / sim / src / simulate.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 /*
5 * Copyright (C) 2016 Red Hat Inc.
6 */
7
8
9 #pragma once
10
11
12 #include <assert.h>
13
14 #include <memory>
15 #include <chrono>
16 #include <map>
17 #include <random>
18 #include <iostream>
19 #include <iomanip>
20 #include <string>
21
22
23 namespace crimson {
24 namespace qos_simulation {
25
26 template<typename ServerId, typename ClientId, typename TS, typename TC>
27 class Simulation {
28
29 public:
30
31 using TimePoint = std::chrono::time_point<std::chrono::steady_clock>;
32
33 protected:
34
35 using ClientMap = std::map<ClientId,TC*>;
36 using ServerMap = std::map<ServerId,TS*>;
37
38 uint server_count = 0;
39 uint client_count = 0;
40
41 ServerMap servers;
42 ClientMap clients;
43 std::vector<ServerId> server_ids;
44
45 TimePoint early_time;
46 TimePoint servers_created_time;
47 TimePoint clients_created_time;
48 TimePoint clients_finished_time;
49 TimePoint late_time;
50
51 std::default_random_engine prng;
52
53 bool has_run = false;
54
55
56 public:
57
58 double fmt_tp(const TimePoint& t) {
59 auto c = t.time_since_epoch().count();
60 return uint64_t(c / 1000000.0 + 0.5) % 100000 / 1000.0;
61 }
62
63 TimePoint now() {
64 return std::chrono::steady_clock::now();
65 }
66
67 using ClientBasedServerSelectFunc =
68 std::function<const ServerId&(uint64_t, uint16_t)>;
69
70 using ClientFilter = std::function<bool(const ClientId&)>;
71
72 using ServerFilter = std::function<bool(const ServerId&)>;
73
74 using ServerDataOutF =
75 std::function<void(std::ostream& out,
76 Simulation* sim, ServerFilter,
77 int header_w, int data_w, int data_prec)>;
78
79 using ClientDataOutF =
80 std::function<void(std::ostream& out,
81 Simulation* sim, ClientFilter,
82 int header_w, int data_w, int data_prec)>;
83
84 Simulation() :
85 early_time(now()),
86 prng(std::chrono::system_clock::now().time_since_epoch().count())
87 {
88 // empty
89 }
90
91 ~Simulation() {
92 for (auto c : clients) {
93 TC* cp = c.second;
94 delete cp;
95 }
96
97 for (auto s : servers) {
98 delete s.second;
99 }
100 }
101
102 uint get_client_count() const { return client_count; }
103 uint get_server_count() const { return server_count; }
104 TC& get_client(ClientId id) { return *clients[id]; }
105 TS& get_server(ServerId id) { return *servers[id]; }
106 const ServerId& get_server_id(uint index) const {
107 return server_ids[index];
108 }
109
110
111 void add_servers(uint count,
112 std::function<TS*(ServerId)> create_server_f) {
113 uint i = server_count;
114
115 // increment server_count before creating servers since they
116 // will start running immediately and may use the server_count
117 // value; NB: this could still be an issue if servers are
118 // added with multiple add_servers calls; consider using a
119 // separate start function after all servers (and clients?)
120 // have been added
121 server_count += count;
122
123 for (; i < server_count; ++i) {
124 server_ids.push_back(i);
125 servers[i] = create_server_f(i);
126 }
127
128 servers_created_time = now();
129 }
130
131
132 void add_clients(uint count,
133 std::function<TC*(ClientId)> create_client_f) {
134 uint i = client_count;
135
136 // increment client_count before creating clients since they
137 // will start running immediately and may use the client_count
138 // value (e.g., in the server selection function); NB: this could
139 // still be an issue if clients are added with multiple
140 // add_clients calls; consider using a separate start function
141 // after all clients have been added
142 client_count += count;
143
144 for (; i < client_count; ++i) {
145 clients[i] = create_client_f(i);
146 }
147
148 clients_created_time = now();
149 }
150
151
152 void run() {
153 assert(server_count > 0);
154 assert(client_count > 0);
155
156 std::cout << "simulation started" << std::endl;
157
158 // clients are now running; wait for all to finish
159
160 for (auto const &i : clients) {
161 i.second->wait_until_done();
162 }
163
164 late_time = clients_finished_time = now();
165
166 std::cout << "simulation completed in " <<
167 std::chrono::duration_cast<std::chrono::milliseconds>(clients_finished_time - servers_created_time).count() <<
168 " millisecs" << std::endl;
169
170 has_run = true;
171 } // run
172
173
174 void display_stats(std::ostream& out,
175 ServerDataOutF server_out_f, ClientDataOutF client_out_f,
176 ServerFilter server_filter =
177 [] (const ServerId&) { return true; },
178 ClientFilter client_filter =
179 [] (const ClientId&) { return true; },
180 int head_w = 12, int data_w = 7, int data_prec = 2) {
181 assert(has_run);
182
183 // skip first 2 secondsd of data
184 const std::chrono::seconds skip_amount(0);
185 // calculate in groups of 5 seconds
186 const std::chrono::seconds measure_unit(2);
187 // unit to output reports in
188 const std::chrono::seconds report_unit(1);
189
190 // compute and display stats
191
192 TimePoint earliest_start = late_time;
193 TimePoint latest_start = early_time;
194 TimePoint earliest_finish = late_time;
195 TimePoint latest_finish = early_time;
196
197 for (auto const &c : clients) {
198 auto start = c.second->get_op_times().front();
199 auto end = c.second->get_op_times().back();
200
201 if (start < earliest_start) { earliest_start = start; }
202 if (start > latest_start) { latest_start = start; }
203 if (end < earliest_finish) { earliest_finish = end; }
204 if (end > latest_finish) { latest_finish = end; }
205 }
206
207 double ops_factor =
208 std::chrono::duration_cast<std::chrono::duration<double>>(measure_unit) /
209 std::chrono::duration_cast<std::chrono::duration<double>>(report_unit);
210
211 const auto start_edge = clients_created_time + skip_amount;
212
213 std::map<ClientId,std::vector<double>> ops_data;
214
215 for (auto const &c : clients) {
216 auto it = c.second->get_op_times().begin();
217 const auto end = c.second->get_op_times().end();
218 while (it != end && *it < start_edge) { ++it; }
219
220 for (auto time_edge = start_edge + measure_unit;
221 time_edge <= latest_finish + measure_unit;
222 time_edge += measure_unit) {
223 int count = 0;
224 for (; it != end && *it < time_edge; ++count, ++it) { /* empty */ }
225 double ops_per_second = double(count) / ops_factor;
226 ops_data[c.first].push_back(ops_per_second);
227 }
228 }
229
230 out << "==== Client Data ====" << std::endl;
231
232 out << std::setw(head_w) << "client:";
233 for (auto const &c : clients) {
234 if (!client_filter(c.first)) continue;
235 out << " " << std::setw(data_w) << c.first;
236 }
237 out << std::setw(data_w) << "total" << std::endl;
238
239 {
240 bool has_data;
241 size_t i = 0;
242 do {
243 std::string line_header = "t_" + std::to_string(i) + ":";
244 out << std::setw(head_w) << line_header;
245 has_data = false;
246 double total = 0.0;
247 for (auto const &c : clients) {
248 double data = 0.0;
249 if (i < ops_data[c.first].size()) {
250 data = ops_data[c.first][i];
251 has_data = true;
252 }
253 total += data;
254
255 if (!client_filter(c.first)) continue;
256
257 out << " " << std::setw(data_w) << std::setprecision(data_prec) <<
258 std::fixed << data;
259 }
260 out << " " << std::setw(data_w) << std::setprecision(data_prec) <<
261 std::fixed << total << std::endl;
262 ++i;
263 } while(has_data);
264 }
265
266 client_out_f(out, this, client_filter, head_w, data_w, data_prec);
267
268 display_client_internal_stats<std::chrono::nanoseconds>(out,
269 "nanoseconds");
270
271 out << std::endl << "==== Server Data ====" << std::endl;
272
273 out << std::setw(head_w) << "server:";
274 for (auto const &s : servers) {
275 if (!server_filter(s.first)) continue;
276 out << " " << std::setw(data_w) << s.first;
277 }
278 out << " " << std::setw(data_w) << "total" << std::endl;
279
280 server_out_f(out, this, server_filter, head_w, data_w, data_prec);
281
282 display_server_internal_stats<std::chrono::nanoseconds>(out,
283 "nanoseconds");
284
285 // clean up clients then servers
286
287 for (auto i = clients.begin(); i != clients.end(); ++i) {
288 delete i->second;
289 i->second = nullptr;
290 }
291
292 for (auto i = servers.begin(); i != servers.end(); ++i) {
293 delete i->second;
294 i->second = nullptr;
295 }
296 } // display_stats
297
298
299 template<typename T>
300 void display_server_internal_stats(std::ostream& out,
301 std::string time_unit) {
302 T add_request_time(0);
303 T request_complete_time(0);
304 uint32_t add_request_count = 0;
305 uint32_t request_complete_count = 0;
306
307 for (uint i = 0; i < get_server_count(); ++i) {
308 const auto& server = get_server(i);
309 const auto& is = server.get_internal_stats();
310 add_request_time +=
311 std::chrono::duration_cast<T>(is.add_request_time);
312 request_complete_time +=
313 std::chrono::duration_cast<T>(is.request_complete_time);
314 add_request_count += is.add_request_count;
315 request_complete_count += is.request_complete_count;
316 }
317
318 double add_request_time_per_unit =
319 double(add_request_time.count()) / add_request_count ;
320 out << "total time to add requests: " <<
321 std::fixed << add_request_time.count() << " " << time_unit <<
322 ";" << std::endl <<
323 " count: " << add_request_count << ";" << std::endl <<
324 " average: " << add_request_time_per_unit <<
325 " " << time_unit << " per request/response" << std::endl;
326
327 double request_complete_time_unit =
328 double(request_complete_time.count()) / request_complete_count ;
329 out << "total time to note requests complete: " << std::fixed <<
330 request_complete_time.count() << " " << time_unit << ";" <<
331 std::endl <<
332 " count: " << request_complete_count << ";" << std::endl <<
333 " average: " << request_complete_time_unit <<
334 " " << time_unit << " per request/response" << std::endl;
335
336 out << std::endl;
337
338 assert(add_request_count == request_complete_count);
339 out << "server timing for QOS algorithm: " <<
340 add_request_time_per_unit + request_complete_time_unit <<
341 " " << time_unit << " per request/response" << std::endl;
342 }
343
344
345 template<typename T>
346 void display_client_internal_stats(std::ostream& out,
347 std::string time_unit) {
348 T track_resp_time(0);
349 T get_req_params_time(0);
350 uint32_t track_resp_count = 0;
351 uint32_t get_req_params_count = 0;
352
353 for (uint i = 0; i < get_client_count(); ++i) {
354 const auto& client = get_client(i);
355 const auto& is = client.get_internal_stats();
356 track_resp_time +=
357 std::chrono::duration_cast<T>(is.track_resp_time);
358 get_req_params_time +=
359 std::chrono::duration_cast<T>(is.get_req_params_time);
360 track_resp_count += is.track_resp_count;
361 get_req_params_count += is.get_req_params_count;
362 }
363
364 double track_resp_time_unit =
365 double(track_resp_time.count()) / track_resp_count;
366 out << "total time to track responses: " <<
367 std::fixed << track_resp_time.count() << " " << time_unit << ";" <<
368 std::endl <<
369 " count: " << track_resp_count << ";" << std::endl <<
370 " average: " << track_resp_time_unit << " " << time_unit <<
371 " per request/response" << std::endl;
372
373 double get_req_params_time_unit =
374 double(get_req_params_time.count()) / get_req_params_count;
375 out << "total time to get request parameters: " <<
376 std::fixed << get_req_params_time.count() << " " << time_unit <<
377 ";" << std::endl <<
378 " count: " << get_req_params_count << ";" << std::endl <<
379 " average: " << get_req_params_time_unit << " " << time_unit <<
380 " per request/response" << std::endl;
381
382 out << std::endl;
383
384 assert(track_resp_count == get_req_params_count);
385 out << "client timing for QOS algorithm: " <<
386 track_resp_time_unit + get_req_params_time_unit << " " <<
387 time_unit << " per request/response" << std::endl;
388 }
389
390
391 // **** server selection functions ****
392
393
394 const ServerId& server_select_alternate(uint64_t seed,
395 uint16_t client_idx) {
396 uint index = (client_idx + seed) % server_count;
397 return server_ids[index];
398 }
399
400
401 // returns a lambda using the range specified as servers_per (client)
402 ClientBasedServerSelectFunc
403 make_server_select_alt_range(uint16_t servers_per) {
404 return [servers_per,this](uint64_t seed, uint16_t client_idx)
405 -> const ServerId& {
406 double factor = double(server_count) / client_count;
407 uint offset = seed % servers_per;
408 uint index = (uint(0.5 + client_idx * factor) + offset) % server_count;
409 return server_ids[index];
410 };
411 }
412
413
414 // function to choose a server randomly
415 const ServerId& server_select_random(uint64_t seed, uint16_t client_idx) {
416 uint index = prng() % server_count;
417 return server_ids[index];
418 }
419
420
421 // function to choose a server randomly
422 ClientBasedServerSelectFunc
423 make_server_select_ran_range(uint16_t servers_per) {
424 return [servers_per,this](uint64_t seed, uint16_t client_idx)
425 -> const ServerId& {
426 double factor = double(server_count) / client_count;
427 uint offset = prng() % servers_per;
428 uint index = (uint(0.5 + client_idx * factor) + offset) % server_count;
429 return server_ids[index];
430 };
431 }
432
433
434 // function to always choose the first server
435 const ServerId& server_select_0(uint64_t seed, uint16_t client_idx) {
436 return server_ids[0];
437 }
438 }; // class Simulation
439
440 }; // namespace qos_simulation
441 }; // namespace crimson