]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/beast/test/bench/wsload/wsload.cpp
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / boost / libs / beast / test / bench / wsload / wsload.cpp
CommitLineData
b32b8144 1//
92f5a8d4 2// Copyright (c) 2016-2019 Vinnie Falco (vinnie dot falco at gmail dot com)
b32b8144
FG
3//
4// Distributed under the Boost Software License, Version 1.0. (See accompanying
5// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6//
7// Official repository: https://github.com/boostorg/beast
8//
9
10//------------------------------------------------------------------------------
11//
12// wsload
13//
14// Measure the performance of a WebSocket server
15//
16//------------------------------------------------------------------------------
17
b32b8144
FG
18#include <boost/beast/core.hpp>
19#include <boost/beast/websocket.hpp>
92f5a8d4 20#include <boost/beast/_experimental/unit_test/dstream.hpp>
b32b8144
FG
21#include <boost/asio.hpp>
22#include <atomic>
23#include <chrono>
24#include <cstdlib>
25#include <functional>
26#include <iostream>
27#include <memory>
28#include <mutex>
29#include <random>
30#include <thread>
31#include <vector>
32
92f5a8d4
TL
33namespace beast = boost::beast; // from <boost/beast.hpp>
34namespace http = beast::http; // from <boost/beast/http.hpp>
35namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
36namespace net = boost::asio; // from <boost/asio.hpp>
37using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
b32b8144
FG
38
39class test_buffer
40{
41 char data_[4096];
92f5a8d4 42 net::const_buffer b_;
b32b8144
FG
43
44public:
45 using const_iterator =
92f5a8d4 46 net::const_buffer const*;
b32b8144 47
92f5a8d4 48 using value_type = net::const_buffer;
b32b8144
FG
49
50 test_buffer()
51 : b_(data_, sizeof(data_))
52 {
53 std::mt19937_64 rng;
54 std::uniform_int_distribution<unsigned short> dist;
55 for(auto& c : data_)
56 c = static_cast<unsigned char>(dist(rng));
57 }
58
59 const_iterator
60 begin() const
61 {
62 return &b_;
63 }
64
65 const_iterator
66 end() const
67 {
68 return begin() + 1;
69 }
70};
71
72class report
73{
74 std::mutex m_;
75 std::size_t bytes_ = 0;
76 std::size_t messages_ = 0;
77
78public:
79 void
80 insert(std::size_t messages, std::size_t bytes)
81 {
82 std::lock_guard<std::mutex> lock(m_);
83 bytes_ += bytes;
84 messages_ += messages;
85 }
86
87 std::size_t
88 bytes() const
89 {
90 return bytes_;
91 }
92
93 std::size_t
94 messages() const
95 {
96 return messages_;
97 }
98};
99
100void
92f5a8d4 101fail(beast::error_code ec, char const* what)
b32b8144
FG
102{
103 std::cerr << what << ": " << ec.message() << "\n";
104}
105
106class connection
107 : public std::enable_shared_from_this<connection>
108{
92f5a8d4 109 websocket::stream<tcp::socket> ws_;
b32b8144
FG
110 tcp::endpoint ep_;
111 std::size_t messages_;
112 report& rep_;
113 test_buffer const& tb_;
92f5a8d4
TL
114 net::strand<
115 net::io_context::executor_type> strand_;
116 beast::flat_buffer buffer_;
b32b8144
FG
117 std::mt19937_64 rng_;
118 std::size_t count_ = 0;
119 std::size_t bytes_ = 0;
b32b8144
FG
120
121public:
122 connection(
92f5a8d4 123 net::io_context& ioc,
b32b8144
FG
124 tcp::endpoint const& ep,
125 std::size_t messages,
126 bool deflate,
127 report& rep,
128 test_buffer const& tb)
129 : ws_(ioc)
130 , ep_(ep)
131 , messages_(messages)
132 , rep_(rep)
133 , tb_(tb)
134 , strand_(ioc.get_executor())
135 {
92f5a8d4 136 websocket::permessage_deflate pmd;
b32b8144
FG
137 pmd.client_enable = deflate;
138 ws_.set_option(pmd);
139 ws_.binary(true);
140 ws_.auto_fragment(false);
92f5a8d4 141 ws_.write_buffer_bytes(64 * 1024);
b32b8144
FG
142 }
143
144 ~connection()
145 {
146 rep_.insert(count_, bytes_);
147 }
148
149 void
150 run()
151 {
152 ws_.next_layer().async_connect(ep_,
92f5a8d4 153 beast::bind_front_handler(
b32b8144 154 &connection::on_connect,
92f5a8d4 155 this->shared_from_this()));
b32b8144
FG
156 }
157
158private:
159 void
92f5a8d4 160 on_connect(beast::error_code ec)
b32b8144
FG
161 {
162 if(ec)
163 return fail(ec, "on_connect");
164
165 ws_.async_handshake(
166 ep_.address().to_string() + ":" + std::to_string(ep_.port()),
167 "/",
92f5a8d4 168 beast::bind_front_handler(
b32b8144 169 &connection::on_handshake,
92f5a8d4 170 this->shared_from_this()));
b32b8144
FG
171 }
172
173 void
92f5a8d4 174 on_handshake(beast::error_code ec)
b32b8144
FG
175 {
176 if(ec)
177 return fail(ec, "handshake");
178
179 do_write();
180 }
181
182 void
183 do_write()
184 {
185 std::geometric_distribution<std::size_t> dist{
92f5a8d4 186 double(4) / beast::buffer_bytes(tb_)};
b32b8144 187 ws_.async_write_some(true,
92f5a8d4
TL
188 beast::buffers_prefix(dist(rng_), tb_),
189 beast::bind_front_handler(
b32b8144 190 &connection::on_write,
92f5a8d4 191 this->shared_from_this()));
b32b8144
FG
192 }
193
194 void
92f5a8d4 195 on_write(beast::error_code ec, std::size_t)
b32b8144
FG
196 {
197 if(ec)
198 return fail(ec, "write");
199
200 if(messages_--)
201 return do_read();
202
203 ws_.async_close({},
92f5a8d4 204 beast::bind_front_handler(
b32b8144 205 &connection::on_close,
92f5a8d4 206 this->shared_from_this()));
b32b8144
FG
207 }
208
209 void
210 do_read()
211 {
212 ws_.async_read(buffer_,
92f5a8d4 213 beast::bind_front_handler(
b32b8144 214 &connection::on_read,
92f5a8d4 215 this->shared_from_this()));
b32b8144
FG
216 }
217
218 void
92f5a8d4 219 on_read(beast::error_code ec, std::size_t)
b32b8144
FG
220 {
221 if(ec)
222 return fail(ec, "read");
223
224 ++count_;
225 bytes_ += buffer_.size();
226 buffer_.consume(buffer_.size());
227 do_write();
228 }
229
230 void
92f5a8d4 231 on_close(beast::error_code ec)
b32b8144
FG
232 {
233 if(ec)
234 return fail(ec, "close");
235 }
236};
237
238class timer
239{
240 using clock_type =
241 std::chrono::system_clock;
242
243 clock_type::time_point when_;
244
245public:
246 using duration =
247 clock_type::duration;
248
249 timer()
250 : when_(clock_type::now())
251 {
252 }
253
254 duration
255 elapsed() const
256 {
257 return clock_type::now() - when_;
258 }
259};
260
261inline
262std::uint64_t
263throughput(
264 std::chrono::duration<double> const& elapsed,
265 std::uint64_t items)
266{
267 using namespace std::chrono;
268 return static_cast<std::uint64_t>(
269 1 / (elapsed/items).count());
270}
271
272int
273main(int argc, char** argv)
274{
92f5a8d4 275 beast::unit_test::dstream dout(std::cerr);
b32b8144
FG
276
277 try
278 {
279 // Check command line arguments.
280 if(argc != 8)
281 {
282 std::cerr <<
283 "Usage: bench-wsload <address> <port> <trials> <messages> <workers> <threads> <compression:0|1>";
284 return EXIT_FAILURE;
285 }
286
92f5a8d4 287 auto const address = net::ip::make_address(argv[1]);
b32b8144
FG
288 auto const port = static_cast<unsigned short>(std::atoi(argv[2]));
289 auto const trials = static_cast<std::size_t>(std::atoi(argv[3]));
290 auto const messages= static_cast<std::size_t>(std::atoi(argv[4]));
291 auto const workers = static_cast<std::size_t>(std::atoi(argv[5]));
292 auto const threads = static_cast<std::size_t>(std::atoi(argv[6]));
293 auto const deflate = std::atoi(argv[7]) != 0;
294 auto const work = (messages + workers - 1) / workers;
295 test_buffer tb;
296 for(auto i = trials; i != 0; --i)
297 {
298 report rep;
92f5a8d4 299 net::io_context ioc{1};
b32b8144
FG
300 for(auto j = workers; j; --j)
301 {
302 auto sp =
303 std::make_shared<connection>(
304 ioc,
305 tcp::endpoint{address, port},
306 work,
307 deflate,
308 rep,
309 tb);
310 sp->run();
311 }
312 timer clock;
313 std::vector<std::thread> tv;
314 if(threads > 1)
315 {
316 tv.reserve(threads);
317 tv.emplace_back([&ioc]{ ioc.run(); });
318 }
319 ioc.run();
320 for(auto& t : tv)
321 t.join();
322 auto const elapsed = clock.elapsed();
323 dout <<
324 throughput(elapsed, rep.bytes()) << " bytes/s in " <<
325 (std::chrono::duration_cast<
326 std::chrono::milliseconds>(
327 elapsed).count() / 1000.) << "ms and " <<
328 rep.bytes() << " bytes" << std::endl;
329 }
330 }
331 catch(std::exception const& e)
332 {
333 std::cerr << "Error: " << e.what() << std::endl;
334 return EXIT_FAILURE;
335 }
336
337 return EXIT_SUCCESS;
338}