]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/math/example/gauss_example.cpp
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / boost / libs / math / example / gauss_example.cpp
CommitLineData
b32b8144
FG
1/*
2 * Copyright John Maddock, 2017
3 * Use, modification and distribution are subject to the
4 * Boost Software License, Version 1.0. (See accompanying file
5 * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 *
7 * This example Illustrates numerical integration via Gauss and Gauss-Kronrod quadrature.
8 */
9
10#include <iostream>
11#include <cmath>
12#include <limits>
13#include <boost/math/quadrature/gauss.hpp>
14#include <boost/math/quadrature/gauss_kronrod.hpp>
15#include <boost/math/constants/constants.hpp>
16#include <boost/math/special_functions/relative_difference.hpp>
17#include <boost/multiprecision/cpp_bin_float.hpp>
18
19void gauss_examples()
20{
21 //[gauss_example
22
23 /*`
24 We'll begin by integrating t[super 2] atan(t) over (0,1) using a 7 term Gauss-Legendre rule,
25 and begin by defining the function to integrate as a C++ lambda expression:
26 */
27 using namespace boost::math::quadrature;
28
29 auto f = [](const double& t) { return t * t * std::atan(t); };
30
31 /*`
32 Integration is simply a matter of calling the `gauss<double, 7>::integrate` method:
33 */
34
35 double Q = gauss<double, 7>::integrate(f, 0, 1);
36
37 /*`
38 Which yields a value 0.2106572512 accurate to 1e-10.
39
40 For more accurate evaluations, we'll move to a multiprecision type and use a 20-point integration scheme:
41 */
42
43 using boost::multiprecision::cpp_bin_float_quad;
44
45 auto f2 = [](const cpp_bin_float_quad& t) { return t * t * atan(t); };
46
47 cpp_bin_float_quad Q2 = gauss<cpp_bin_float_quad, 20>::integrate(f2, 0, 1);
48
49 /*`
50 Which yields 0.2106572512258069881080923020669, which is accurate to 5e-28.
51 */
52
53 //]
54
55 std::cout << std::setprecision(18) << Q << std::endl;
56 std::cout << boost::math::relative_difference(Q, (boost::math::constants::pi<double>() - 2 + 2 * boost::math::constants::ln_two<double>()) / 12) << std::endl;
57
58 std::cout << std::setprecision(34) << Q2 << std::endl;
59 std::cout << boost::math::relative_difference(Q2, (boost::math::constants::pi<cpp_bin_float_quad>() - 2 + 2 * boost::math::constants::ln_two<cpp_bin_float_quad>()) / 12) << std::endl;
60}
61
62void gauss_kronrod_examples()
63{
64 //[gauss_kronrod_example
65
66 /*`
67 We'll begin by integrating exp(-t[super 2]/2) over (0,+[infin]) using a 7 term Gauss rule
68 and 15 term Kronrod rule,
69 and begin by defining the function to integrate as a C++ lambda expression:
70 */
71 using namespace boost::math::quadrature;
72
73 auto f1 = [](double t) { return std::exp(-t*t / 2); };
92f5a8d4 74
b32b8144
FG
75 //<-
76 double Q_expected = sqrt(boost::math::constants::half_pi<double>());
77 //->
78
79 /*`
92f5a8d4 80 We'll start off with a one shot (ie non-adaptive)
b32b8144
FG
81 integration, and keep track of the estimated error:
82 */
83 double error;
84 double Q = gauss_kronrod<double, 15>::integrate(f1, 0, std::numeric_limits<double>::infinity(), 0, 0, &error);
85
86 /*`
87 This yields Q = 1.25348207361, which has an absolute error of 1e-4 compared to the estimated error
88 of 5e-3: this is fairly typical, with the difference between Gauss and Gauss-Kronrod schemes being
89 much higher than the actual error. Before moving on to adaptive quadrature, lets try again
90 with more points, in fact with the largest Gauss-Kronrod scheme we have cached (30/61):
91 */
92 //<-
93 std::cout << std::setprecision(16) << Q << std::endl;
94 std::cout << boost::math::relative_difference(Q, Q_expected) << std::endl;
95 std::cout << fabs(Q - Q_expected) << std::endl;
96 std::cout << error << std::endl;
97 //->
98 Q = gauss_kronrod<double, 61>::integrate(f1, 0, std::numeric_limits<double>::infinity(), 0, 0, &error);
99 //<-
100 std::cout << std::setprecision(16) << Q << std::endl;
101 std::cout << boost::math::relative_difference(Q, Q_expected) << std::endl;
102 std::cout << fabs(Q - Q_expected) << std::endl;
103 std::cout << error << std::endl;
104 //->
105 /*`
106 This yields an absolute error of 3e-15 against an estimate of 1e-8, which is about as good as we're going to get
107 at double precision
108
109 However, instead of continuing with ever more points, lets switch to adaptive integration, and set the desired relative
110 error to 1e-14 against a maximum depth of 5:
111 */
112 Q = gauss_kronrod<double, 15>::integrate(f1, 0, std::numeric_limits<double>::infinity(), 5, 1e-14, &error);
113 //<-
114 std::cout << std::setprecision(16) << Q << std::endl;
115 std::cout << boost::math::relative_difference(Q, Q_expected) << std::endl;
116 std::cout << fabs(Q - Q_expected) << std::endl;
117 std::cout << error << std::endl;
118 //->
119 /*`
92f5a8d4 120 This yields an actual error of zero, against an estimate of 4e-15. In fact in this case the requested tolerance was almost
b32b8144
FG
121 certainly set too low: as we've seen above, for smooth functions, the precision achieved is often double
122 that of the estimate, so if we integrate with a tolerance of 1e-9:
123 */
124 Q = gauss_kronrod<double, 15>::integrate(f1, 0, std::numeric_limits<double>::infinity(), 5, 1e-9, &error);
125 //<-
126 std::cout << std::setprecision(16) << Q << std::endl;
127 std::cout << boost::math::relative_difference(Q, Q_expected) << std::endl;
128 std::cout << fabs(Q - Q_expected) << std::endl;
129 std::cout << error << std::endl;
130 //->
131 /*`
132 We still achieve 1e-15 precision, with an error estimate of 1e-10.
133 */
134 //]
135}
136
137int main()
138{
139 gauss_examples();
140 gauss_kronrod_examples();
141 return 0;
142}