]> git.proxmox.com Git - ceph.git/blame - ceph/src/dmclock/sim/src/simulate.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / dmclock / sim / src / simulate.h
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4/*
5 * Copyright (C) 2016 Red Hat Inc.
11fdf7f2
TL
6 *
7 * Author: J. Eric Ivancich <ivancich@redhat.com>
8 *
9 * This is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU Lesser General Public License version
11 * 2.1, as published by the Free Software Foundation. See file
12 * COPYING.
7c673cae
FG
13 */
14
15
16#pragma once
17
18
19#include <assert.h>
20
21#include <memory>
22#include <chrono>
23#include <map>
24#include <random>
25#include <iostream>
26#include <iomanip>
27#include <string>
28
29
30namespace crimson {
31 namespace qos_simulation {
32
33 template<typename ServerId, typename ClientId, typename TS, typename TC>
34 class Simulation {
35
36 public:
37
38 using TimePoint = std::chrono::time_point<std::chrono::steady_clock>;
39
40 protected:
41
42 using ClientMap = std::map<ClientId,TC*>;
43 using ServerMap = std::map<ServerId,TS*>;
44
9f95a23c
TL
45 unsigned server_count = 0;
46 unsigned client_count = 0;
7c673cae
FG
47
48 ServerMap servers;
49 ClientMap clients;
50 std::vector<ServerId> server_ids;
51
52 TimePoint early_time;
53 TimePoint servers_created_time;
54 TimePoint clients_created_time;
55 TimePoint clients_finished_time;
56 TimePoint late_time;
57
58 std::default_random_engine prng;
59
60 bool has_run = false;
61
62
63 public:
64
65 double fmt_tp(const TimePoint& t) {
66 auto c = t.time_since_epoch().count();
67 return uint64_t(c / 1000000.0 + 0.5) % 100000 / 1000.0;
68 }
69
70 TimePoint now() {
71 return std::chrono::steady_clock::now();
72 }
73
74 using ClientBasedServerSelectFunc =
75 std::function<const ServerId&(uint64_t, uint16_t)>;
76
77 using ClientFilter = std::function<bool(const ClientId&)>;
78
79 using ServerFilter = std::function<bool(const ServerId&)>;
80
81 using ServerDataOutF =
82 std::function<void(std::ostream& out,
83 Simulation* sim, ServerFilter,
84 int header_w, int data_w, int data_prec)>;
85
86 using ClientDataOutF =
87 std::function<void(std::ostream& out,
88 Simulation* sim, ClientFilter,
89 int header_w, int data_w, int data_prec)>;
90
91 Simulation() :
92 early_time(now()),
93 prng(std::chrono::system_clock::now().time_since_epoch().count())
94 {
95 // empty
96 }
97
31f18b77
FG
98 ~Simulation() {
99 for (auto c : clients) {
100 TC* cp = c.second;
101 delete cp;
102 }
103
104 for (auto s : servers) {
105 delete s.second;
106 }
107 }
108
9f95a23c
TL
109 unsigned get_client_count() const { return client_count; }
110 unsigned get_server_count() const { return server_count; }
7c673cae
FG
111 TC& get_client(ClientId id) { return *clients[id]; }
112 TS& get_server(ServerId id) { return *servers[id]; }
9f95a23c 113 const ServerId& get_server_id(std::size_t index) const {
7c673cae
FG
114 return server_ids[index];
115 }
116
117
9f95a23c 118 void add_servers(unsigned count,
7c673cae 119 std::function<TS*(ServerId)> create_server_f) {
9f95a23c 120 unsigned i = server_count;
7c673cae
FG
121
122 // increment server_count before creating servers since they
123 // will start running immediately and may use the server_count
124 // value; NB: this could still be an issue if servers are
125 // added with multiple add_servers calls; consider using a
126 // separate start function after all servers (and clients?)
127 // have been added
128 server_count += count;
129
130 for (; i < server_count; ++i) {
131 server_ids.push_back(i);
132 servers[i] = create_server_f(i);
133 }
134
135 servers_created_time = now();
136 }
137
138
9f95a23c 139 void add_clients(unsigned count,
7c673cae 140 std::function<TC*(ClientId)> create_client_f) {
9f95a23c 141 unsigned i = client_count;
7c673cae
FG
142
143 // increment client_count before creating clients since they
144 // will start running immediately and may use the client_count
145 // value (e.g., in the server selection function); NB: this could
146 // still be an issue if clients are added with multiple
147 // add_clients calls; consider using a separate start function
148 // after all clients have been added
149 client_count += count;
150
151 for (; i < client_count; ++i) {
152 clients[i] = create_client_f(i);
153 }
154
155 clients_created_time = now();
156 }
157
158
159 void run() {
160 assert(server_count > 0);
161 assert(client_count > 0);
162
163 std::cout << "simulation started" << std::endl;
164
165 // clients are now running; wait for all to finish
166
167 for (auto const &i : clients) {
168 i.second->wait_until_done();
169 }
170
171 late_time = clients_finished_time = now();
172
173 std::cout << "simulation completed in " <<
174 std::chrono::duration_cast<std::chrono::milliseconds>(clients_finished_time - servers_created_time).count() <<
175 " millisecs" << std::endl;
176
177 has_run = true;
178 } // run
179
180
181 void display_stats(std::ostream& out,
182 ServerDataOutF server_out_f, ClientDataOutF client_out_f,
183 ServerFilter server_filter =
184 [] (const ServerId&) { return true; },
185 ClientFilter client_filter =
186 [] (const ClientId&) { return true; },
187 int head_w = 12, int data_w = 7, int data_prec = 2) {
188 assert(has_run);
189
190 // skip first 2 secondsd of data
191 const std::chrono::seconds skip_amount(0);
192 // calculate in groups of 5 seconds
193 const std::chrono::seconds measure_unit(2);
194 // unit to output reports in
195 const std::chrono::seconds report_unit(1);
196
197 // compute and display stats
198
199 TimePoint earliest_start = late_time;
200 TimePoint latest_start = early_time;
201 TimePoint earliest_finish = late_time;
202 TimePoint latest_finish = early_time;
203
204 for (auto const &c : clients) {
205 auto start = c.second->get_op_times().front();
206 auto end = c.second->get_op_times().back();
207
208 if (start < earliest_start) { earliest_start = start; }
209 if (start > latest_start) { latest_start = start; }
210 if (end < earliest_finish) { earliest_finish = end; }
211 if (end > latest_finish) { latest_finish = end; }
212 }
213
214 double ops_factor =
215 std::chrono::duration_cast<std::chrono::duration<double>>(measure_unit) /
216 std::chrono::duration_cast<std::chrono::duration<double>>(report_unit);
217
218 const auto start_edge = clients_created_time + skip_amount;
219
220 std::map<ClientId,std::vector<double>> ops_data;
221
222 for (auto const &c : clients) {
223 auto it = c.second->get_op_times().begin();
224 const auto end = c.second->get_op_times().end();
225 while (it != end && *it < start_edge) { ++it; }
226
227 for (auto time_edge = start_edge + measure_unit;
228 time_edge <= latest_finish + measure_unit;
229 time_edge += measure_unit) {
230 int count = 0;
231 for (; it != end && *it < time_edge; ++count, ++it) { /* empty */ }
232 double ops_per_second = double(count) / ops_factor;
233 ops_data[c.first].push_back(ops_per_second);
234 }
235 }
236
237 out << "==== Client Data ====" << std::endl;
238
239 out << std::setw(head_w) << "client:";
240 for (auto const &c : clients) {
241 if (!client_filter(c.first)) continue;
242 out << " " << std::setw(data_w) << c.first;
243 }
244 out << std::setw(data_w) << "total" << std::endl;
245
246 {
247 bool has_data;
248 size_t i = 0;
249 do {
250 std::string line_header = "t_" + std::to_string(i) + ":";
251 out << std::setw(head_w) << line_header;
252 has_data = false;
253 double total = 0.0;
254 for (auto const &c : clients) {
255 double data = 0.0;
256 if (i < ops_data[c.first].size()) {
257 data = ops_data[c.first][i];
258 has_data = true;
259 }
260 total += data;
261
262 if (!client_filter(c.first)) continue;
263
264 out << " " << std::setw(data_w) << std::setprecision(data_prec) <<
265 std::fixed << data;
266 }
267 out << " " << std::setw(data_w) << std::setprecision(data_prec) <<
268 std::fixed << total << std::endl;
269 ++i;
270 } while(has_data);
271 }
272
273 client_out_f(out, this, client_filter, head_w, data_w, data_prec);
274
275 display_client_internal_stats<std::chrono::nanoseconds>(out,
276 "nanoseconds");
277
278 out << std::endl << "==== Server Data ====" << std::endl;
279
280 out << std::setw(head_w) << "server:";
281 for (auto const &s : servers) {
282 if (!server_filter(s.first)) continue;
283 out << " " << std::setw(data_w) << s.first;
284 }
285 out << " " << std::setw(data_w) << "total" << std::endl;
286
287 server_out_f(out, this, server_filter, head_w, data_w, data_prec);
288
289 display_server_internal_stats<std::chrono::nanoseconds>(out,
290 "nanoseconds");
291
292 // clean up clients then servers
293
294 for (auto i = clients.begin(); i != clients.end(); ++i) {
295 delete i->second;
296 i->second = nullptr;
297 }
298
299 for (auto i = servers.begin(); i != servers.end(); ++i) {
300 delete i->second;
301 i->second = nullptr;
302 }
303 } // display_stats
304
305
306 template<typename T>
307 void display_server_internal_stats(std::ostream& out,
11fdf7f2 308 const std::string& time_unit) {
7c673cae
FG
309 T add_request_time(0);
310 T request_complete_time(0);
311 uint32_t add_request_count = 0;
312 uint32_t request_complete_count = 0;
313
9f95a23c 314 for (unsigned i = 0; i < get_server_count(); ++i) {
7c673cae
FG
315 const auto& server = get_server(i);
316 const auto& is = server.get_internal_stats();
317 add_request_time +=
318 std::chrono::duration_cast<T>(is.add_request_time);
319 request_complete_time +=
320 std::chrono::duration_cast<T>(is.request_complete_time);
321 add_request_count += is.add_request_count;
322 request_complete_count += is.request_complete_count;
323 }
324
325 double add_request_time_per_unit =
326 double(add_request_time.count()) / add_request_count ;
327 out << "total time to add requests: " <<
328 std::fixed << add_request_time.count() << " " << time_unit <<
329 ";" << std::endl <<
330 " count: " << add_request_count << ";" << std::endl <<
331 " average: " << add_request_time_per_unit <<
332 " " << time_unit << " per request/response" << std::endl;
333
334 double request_complete_time_unit =
335 double(request_complete_time.count()) / request_complete_count ;
336 out << "total time to note requests complete: " << std::fixed <<
337 request_complete_time.count() << " " << time_unit << ";" <<
338 std::endl <<
339 " count: " << request_complete_count << ";" << std::endl <<
340 " average: " << request_complete_time_unit <<
341 " " << time_unit << " per request/response" << std::endl;
342
343 out << std::endl;
344
345 assert(add_request_count == request_complete_count);
346 out << "server timing for QOS algorithm: " <<
347 add_request_time_per_unit + request_complete_time_unit <<
348 " " << time_unit << " per request/response" << std::endl;
349 }
350
351
352 template<typename T>
353 void display_client_internal_stats(std::ostream& out,
11fdf7f2 354 const std::string& time_unit) {
7c673cae
FG
355 T track_resp_time(0);
356 T get_req_params_time(0);
357 uint32_t track_resp_count = 0;
358 uint32_t get_req_params_count = 0;
359
9f95a23c 360 for (unsigned i = 0; i < get_client_count(); ++i) {
7c673cae
FG
361 const auto& client = get_client(i);
362 const auto& is = client.get_internal_stats();
363 track_resp_time +=
364 std::chrono::duration_cast<T>(is.track_resp_time);
365 get_req_params_time +=
366 std::chrono::duration_cast<T>(is.get_req_params_time);
367 track_resp_count += is.track_resp_count;
368 get_req_params_count += is.get_req_params_count;
369 }
370
371 double track_resp_time_unit =
372 double(track_resp_time.count()) / track_resp_count;
373 out << "total time to track responses: " <<
374 std::fixed << track_resp_time.count() << " " << time_unit << ";" <<
375 std::endl <<
376 " count: " << track_resp_count << ";" << std::endl <<
377 " average: " << track_resp_time_unit << " " << time_unit <<
378 " per request/response" << std::endl;
379
380 double get_req_params_time_unit =
381 double(get_req_params_time.count()) / get_req_params_count;
382 out << "total time to get request parameters: " <<
383 std::fixed << get_req_params_time.count() << " " << time_unit <<
384 ";" << std::endl <<
385 " count: " << get_req_params_count << ";" << std::endl <<
386 " average: " << get_req_params_time_unit << " " << time_unit <<
387 " per request/response" << std::endl;
388
389 out << std::endl;
390
391 assert(track_resp_count == get_req_params_count);
392 out << "client timing for QOS algorithm: " <<
393 track_resp_time_unit + get_req_params_time_unit << " " <<
394 time_unit << " per request/response" << std::endl;
395 }
396
397
398 // **** server selection functions ****
399
400
401 const ServerId& server_select_alternate(uint64_t seed,
402 uint16_t client_idx) {
9f95a23c 403 size_t index = (client_idx + seed) % server_count;
7c673cae
FG
404 return server_ids[index];
405 }
406
407
408 // returns a lambda using the range specified as servers_per (client)
409 ClientBasedServerSelectFunc
410 make_server_select_alt_range(uint16_t servers_per) {
411 return [servers_per,this](uint64_t seed, uint16_t client_idx)
412 -> const ServerId& {
413 double factor = double(server_count) / client_count;
9f95a23c
TL
414 size_t offset = seed % servers_per;
415 size_t index = (size_t(0.5 + client_idx * factor) + offset) % server_count;
7c673cae
FG
416 return server_ids[index];
417 };
418 }
419
420
421 // function to choose a server randomly
422 const ServerId& server_select_random(uint64_t seed, uint16_t client_idx) {
9f95a23c 423 size_t index = prng() % server_count;
7c673cae
FG
424 return server_ids[index];
425 }
426
427
428 // function to choose a server randomly
429 ClientBasedServerSelectFunc
430 make_server_select_ran_range(uint16_t servers_per) {
431 return [servers_per,this](uint64_t seed, uint16_t client_idx)
432 -> const ServerId& {
433 double factor = double(server_count) / client_count;
9f95a23c
TL
434 size_t offset = prng() % servers_per;
435 size_t index = (size_t(0.5 + client_idx * factor) + offset) % server_count;
7c673cae
FG
436 return server_ids[index];
437 };
438 }
439
440
441 // function to always choose the first server
442 const ServerId& server_select_0(uint64_t seed, uint16_t client_idx) {
443 return server_ids[0];
444 }
445 }; // class Simulation
446
447 }; // namespace qos_simulation
448}; // namespace crimson