]>
git.proxmox.com Git - ceph.git/blob - ceph/src/jaegertracing/opentelemetry-cpp/third_party/benchmark/src/complexity.cc
1 // Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 // Source project : https://github.com/ismaelJimenez/cpp.leastsq
16 // Adapted to be used with google benchmark
18 #include "benchmark/benchmark.h"
23 #include "complexity.h"
27 // Internal function to calculate the different scalability forms
28 BigOFunc
* FittingCurve(BigO complexity
) {
29 static const double kLog2E
= 1.44269504088896340736;
32 return [](IterationCount n
) -> double { return static_cast<double>(n
); };
34 return [](IterationCount n
) -> double { return std::pow(n
, 2); };
36 return [](IterationCount n
) -> double { return std::pow(n
, 3); };
38 /* Note: can't use log2 because Android's GNU STL lacks it */
40 [](IterationCount n
) { return kLog2E
* log(static_cast<double>(n
)); };
42 /* Note: can't use log2 because Android's GNU STL lacks it */
43 return [](IterationCount n
) {
44 return kLog2E
* n
* log(static_cast<double>(n
));
48 return [](IterationCount
) { return 1.0; };
52 // Function to return an string for the calculated complexity
53 std::string
GetBigOString(BigO complexity
) {
72 // Find the coefficient for the high-order term in the running time, by
73 // minimizing the sum of squares of relative error, for the fitting curve
74 // given by the lambda expression.
75 // - n : Vector containing the size of the benchmark tests.
76 // - time : Vector containing the times for the benchmark tests.
77 // - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
79 // For a deeper explanation on the algorithm logic, please refer to
80 // https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
82 LeastSq
MinimalLeastSq(const std::vector
<int64_t>& n
,
83 const std::vector
<double>& time
,
84 BigOFunc
* fitting_curve
) {
85 double sigma_gn
= 0.0;
86 double sigma_gn_squared
= 0.0;
87 double sigma_time
= 0.0;
88 double sigma_time_gn
= 0.0;
90 // Calculate least square fitting parameter
91 for (size_t i
= 0; i
< n
.size(); ++i
) {
92 double gn_i
= fitting_curve(n
[i
]);
94 sigma_gn_squared
+= gn_i
* gn_i
;
95 sigma_time
+= time
[i
];
96 sigma_time_gn
+= time
[i
] * gn_i
;
100 result
.complexity
= oLambda
;
102 // Calculate complexity.
103 result
.coef
= sigma_time_gn
/ sigma_gn_squared
;
107 for (size_t i
= 0; i
< n
.size(); ++i
) {
108 double fit
= result
.coef
* fitting_curve(n
[i
]);
109 rms
+= pow((time
[i
] - fit
), 2);
112 // Normalized RMS by the mean of the observed values
113 double mean
= sigma_time
/ n
.size();
114 result
.rms
= sqrt(rms
/ n
.size()) / mean
;
119 // Find the coefficient for the high-order term in the running time, by
120 // minimizing the sum of squares of relative error.
121 // - n : Vector containing the size of the benchmark tests.
122 // - time : Vector containing the times for the benchmark tests.
123 // - complexity : If different than oAuto, the fitting curve will stick to
124 // this one. If it is oAuto, it will be calculated the best
126 LeastSq
MinimalLeastSq(const std::vector
<int64_t>& n
,
127 const std::vector
<double>& time
, const BigO complexity
) {
128 CHECK_EQ(n
.size(), time
.size());
129 CHECK_GE(n
.size(), 2); // Do not compute fitting curve is less than two
130 // benchmark runs are given
131 CHECK_NE(complexity
, oNone
);
135 if (complexity
== oAuto
) {
136 std::vector
<BigO
> fit_curves
= {oLogN
, oN
, oNLogN
, oNSquared
, oNCubed
};
138 // Take o1 as default best fitting curve
139 best_fit
= MinimalLeastSq(n
, time
, FittingCurve(o1
));
140 best_fit
.complexity
= o1
;
142 // Compute all possible fitting curves and stick to the best one
143 for (const auto& fit
: fit_curves
) {
144 LeastSq current_fit
= MinimalLeastSq(n
, time
, FittingCurve(fit
));
145 if (current_fit
.rms
< best_fit
.rms
) {
146 best_fit
= current_fit
;
147 best_fit
.complexity
= fit
;
151 best_fit
= MinimalLeastSq(n
, time
, FittingCurve(complexity
));
152 best_fit
.complexity
= complexity
;
158 std::vector
<BenchmarkReporter::Run
> ComputeBigO(
159 const std::vector
<BenchmarkReporter::Run
>& reports
) {
160 typedef BenchmarkReporter::Run Run
;
161 std::vector
<Run
> results
;
163 if (reports
.size() < 2) return results
;
166 std::vector
<int64_t> n
;
167 std::vector
<double> real_time
;
168 std::vector
<double> cpu_time
;
170 // Populate the accumulators.
171 for (const Run
& run
: reports
) {
172 CHECK_GT(run
.complexity_n
, 0) << "Did you forget to call SetComplexityN?";
173 n
.push_back(run
.complexity_n
);
174 real_time
.push_back(run
.real_accumulated_time
/ run
.iterations
);
175 cpu_time
.push_back(run
.cpu_accumulated_time
/ run
.iterations
);
181 if (reports
[0].complexity
== oLambda
) {
182 result_cpu
= MinimalLeastSq(n
, cpu_time
, reports
[0].complexity_lambda
);
183 result_real
= MinimalLeastSq(n
, real_time
, reports
[0].complexity_lambda
);
185 result_cpu
= MinimalLeastSq(n
, cpu_time
, reports
[0].complexity
);
186 result_real
= MinimalLeastSq(n
, real_time
, result_cpu
.complexity
);
189 // Drop the 'args' when reporting complexity.
190 auto run_name
= reports
[0].run_name
;
191 run_name
.args
.clear();
193 // Get the data from the accumulator to BenchmarkReporter::Run's.
195 big_o
.run_name
= run_name
;
196 big_o
.run_type
= BenchmarkReporter::Run::RT_Aggregate
;
197 big_o
.repetitions
= reports
[0].repetitions
;
198 big_o
.repetition_index
= Run::no_repetition_index
;
199 big_o
.threads
= reports
[0].threads
;
200 big_o
.aggregate_name
= "BigO";
201 big_o
.report_label
= reports
[0].report_label
;
202 big_o
.iterations
= 0;
203 big_o
.real_accumulated_time
= result_real
.coef
;
204 big_o
.cpu_accumulated_time
= result_cpu
.coef
;
205 big_o
.report_big_o
= true;
206 big_o
.complexity
= result_cpu
.complexity
;
208 // All the time results are reported after being multiplied by the
209 // time unit multiplier. But since RMS is a relative quantity it
210 // should not be multiplied at all. So, here, we _divide_ it by the
211 // multiplier so that when it is multiplied later the result is the
213 double multiplier
= GetTimeUnitMultiplier(reports
[0].time_unit
);
215 // Only add label to mean/stddev if it is same for all runs
217 rms
.run_name
= run_name
;
218 rms
.run_type
= BenchmarkReporter::Run::RT_Aggregate
;
219 rms
.aggregate_name
= "RMS";
220 rms
.report_label
= big_o
.report_label
;
222 rms
.repetition_index
= Run::no_repetition_index
;
223 rms
.repetitions
= reports
[0].repetitions
;
224 rms
.threads
= reports
[0].threads
;
225 rms
.real_accumulated_time
= result_real
.rms
/ multiplier
;
226 rms
.cpu_accumulated_time
= result_cpu
.rms
/ multiplier
;
227 rms
.report_rms
= true;
228 rms
.complexity
= result_cpu
.complexity
;
229 // don't forget to keep the time unit, or we won't be able to
230 // recover the correct value.
231 rms
.time_unit
= reports
[0].time_unit
;
233 results
.push_back(big_o
);
234 results
.push_back(rms
);
238 } // end namespace benchmark