]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/beast/websocket/impl/write.ipp
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / boost / boost / beast / websocket / impl / write.ipp
1 //
2 // Copyright (c) 2016-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
3 //
4 // Distributed under the Boost Software License, Version 1.0. (See accompanying
5 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // Official repository: https://github.com/boostorg/beast
8 //
9
10 #ifndef BOOST_BEAST_WEBSOCKET_IMPL_WRITE_IPP
11 #define BOOST_BEAST_WEBSOCKET_IMPL_WRITE_IPP
12
13 #include <boost/beast/core/bind_handler.hpp>
14 #include <boost/beast/core/buffers_cat.hpp>
15 #include <boost/beast/core/buffers_prefix.hpp>
16 #include <boost/beast/core/buffers_suffix.hpp>
17 #include <boost/beast/core/flat_static_buffer.hpp>
18 #include <boost/beast/core/type_traits.hpp>
19 #include <boost/beast/core/detail/clamp.hpp>
20 #include <boost/beast/core/detail/config.hpp>
21 #include <boost/beast/websocket/detail/frame.hpp>
22 #include <boost/asio/associated_allocator.hpp>
23 #include <boost/asio/associated_executor.hpp>
24 #include <boost/asio/coroutine.hpp>
25 #include <boost/asio/handler_continuation_hook.hpp>
26 #include <boost/asio/handler_invoke_hook.hpp>
27 #include <boost/assert.hpp>
28 #include <boost/config.hpp>
29 #include <boost/throw_exception.hpp>
30 #include <algorithm>
31 #include <memory>
32
33 namespace boost {
34 namespace beast {
35 namespace websocket {
36
37 namespace detail {
38
39 // Compress a buffer sequence
40 // Returns: `true` if more calls are needed
41 //
42 template<>
43 template<class ConstBufferSequence>
44 bool
45 stream_base<true>::
46 deflate(
47 boost::asio::mutable_buffer& out,
48 buffers_suffix<ConstBufferSequence>& cb,
49 bool fin,
50 std::size_t& total_in,
51 error_code& ec)
52 {
53 using boost::asio::buffer;
54 BOOST_ASSERT(out.size() >= 6);
55 auto& zo = this->pmd_->zo;
56 zlib::z_params zs;
57 zs.avail_in = 0;
58 zs.next_in = nullptr;
59 zs.avail_out = out.size();
60 zs.next_out = out.data();
61 for(auto in : beast::detail::buffers_range(cb))
62 {
63 zs.avail_in = in.size();
64 if(zs.avail_in == 0)
65 continue;
66 zs.next_in = in.data();
67 zo.write(zs, zlib::Flush::none, ec);
68 if(ec)
69 {
70 if(ec != zlib::error::need_buffers)
71 return false;
72 BOOST_ASSERT(zs.avail_out == 0);
73 BOOST_ASSERT(zs.total_out == out.size());
74 ec.assign(0, ec.category());
75 break;
76 }
77 if(zs.avail_out == 0)
78 {
79 BOOST_ASSERT(zs.total_out == out.size());
80 break;
81 }
82 BOOST_ASSERT(zs.avail_in == 0);
83 }
84 total_in = zs.total_in;
85 cb.consume(zs.total_in);
86 if(zs.avail_out > 0 && fin)
87 {
88 auto const remain = boost::asio::buffer_size(cb);
89 if(remain == 0)
90 {
91 // Inspired by Mark Adler
92 // https://github.com/madler/zlib/issues/149
93 //
94 // VFALCO We could do this flush twice depending
95 // on how much space is in the output.
96 zo.write(zs, zlib::Flush::block, ec);
97 BOOST_ASSERT(! ec || ec == zlib::error::need_buffers);
98 if(ec == zlib::error::need_buffers)
99 ec.assign(0, ec.category());
100 if(ec)
101 return false;
102 if(zs.avail_out >= 6)
103 {
104 zo.write(zs, zlib::Flush::full, ec);
105 BOOST_ASSERT(! ec);
106 // remove flush marker
107 zs.total_out -= 4;
108 out = buffer(out.data(), zs.total_out);
109 return false;
110 }
111 }
112 }
113 ec.assign(0, ec.category());
114 out = buffer(out.data(), zs.total_out);
115 return true;
116 }
117
118 template<>
119 inline
120 void
121 stream_base<true>::
122 do_context_takeover_write(role_type role)
123 {
124 if((role == role_type::client &&
125 this->pmd_config_.client_no_context_takeover) ||
126 (role == role_type::server &&
127 this->pmd_config_.server_no_context_takeover))
128 {
129 this->pmd_->zo.reset();
130 }
131 }
132
133 } // detail
134
135 //------------------------------------------------------------------------------
136
137 template<class NextLayer, bool deflateSupported>
138 template<class Buffers, class Handler>
139 class stream<NextLayer, deflateSupported>::write_some_op
140 : public boost::asio::coroutine
141 {
142 Handler h_;
143 stream<NextLayer, deflateSupported>& ws_;
144 buffers_suffix<Buffers> cb_;
145 detail::frame_header fh_;
146 detail::prepared_key key_;
147 std::size_t bytes_transferred_ = 0;
148 std::size_t remain_;
149 std::size_t in_;
150 int how_;
151 bool fin_;
152 bool more_;
153 bool cont_ = false;
154
155 public:
156 static constexpr int id = 2; // for soft_mutex
157
158 write_some_op(write_some_op&&) = default;
159 write_some_op(write_some_op const&) = delete;
160
161 template<class DeducedHandler>
162 write_some_op(
163 DeducedHandler&& h,
164 stream<NextLayer, deflateSupported>& ws,
165 bool fin,
166 Buffers const& bs)
167 : h_(std::forward<DeducedHandler>(h))
168 , ws_(ws)
169 , cb_(bs)
170 , fin_(fin)
171 {
172 }
173
174 using allocator_type =
175 boost::asio::associated_allocator_t<Handler>;
176
177 allocator_type
178 get_allocator() const noexcept
179 {
180 return (boost::asio::get_associated_allocator)(h_);
181 }
182
183 using executor_type = boost::asio::associated_executor_t<
184 Handler, decltype(std::declval<stream<NextLayer, deflateSupported>&>().get_executor())>;
185
186 executor_type
187 get_executor() const noexcept
188 {
189 return (boost::asio::get_associated_executor)(
190 h_, ws_.get_executor());
191 }
192
193 Handler&
194 handler()
195 {
196 return h_;
197 }
198
199 void operator()(
200 error_code ec = {},
201 std::size_t bytes_transferred = 0,
202 bool cont = true);
203
204 friend
205 bool asio_handler_is_continuation(write_some_op* op)
206 {
207 using boost::asio::asio_handler_is_continuation;
208 return op->cont_ || asio_handler_is_continuation(
209 std::addressof(op->h_));
210 }
211
212 template<class Function>
213 friend
214 void asio_handler_invoke(Function&& f, write_some_op* op)
215 {
216 using boost::asio::asio_handler_invoke;
217 asio_handler_invoke(
218 f, std::addressof(op->h_));
219 }
220 };
221
222 template<class NextLayer, bool deflateSupported>
223 template<class Buffers, class Handler>
224 void
225 stream<NextLayer, deflateSupported>::
226 write_some_op<Buffers, Handler>::
227 operator()(
228 error_code ec,
229 std::size_t bytes_transferred,
230 bool cont)
231 {
232 using beast::detail::clamp;
233 using boost::asio::buffer;
234 using boost::asio::buffer_copy;
235 using boost::asio::buffer_size;
236 using boost::asio::mutable_buffer;
237 enum
238 {
239 do_nomask_nofrag,
240 do_nomask_frag,
241 do_mask_nofrag,
242 do_mask_frag,
243 do_deflate
244 };
245 std::size_t n;
246 boost::asio::mutable_buffer b;
247 cont_ = cont;
248 BOOST_ASIO_CORO_REENTER(*this)
249 {
250 // Set up the outgoing frame header
251 if(! ws_.wr_cont_)
252 {
253 ws_.begin_msg();
254 fh_.rsv1 = ws_.wr_compress_;
255 }
256 else
257 {
258 fh_.rsv1 = false;
259 }
260 fh_.rsv2 = false;
261 fh_.rsv3 = false;
262 fh_.op = ws_.wr_cont_ ?
263 detail::opcode::cont : ws_.wr_opcode_;
264 fh_.mask =
265 ws_.role_ == role_type::client;
266
267 // Choose a write algorithm
268 if(ws_.wr_compress_)
269 {
270 how_ = do_deflate;
271 }
272 else if(! fh_.mask)
273 {
274 if(! ws_.wr_frag_)
275 {
276 how_ = do_nomask_nofrag;
277 }
278 else
279 {
280 BOOST_ASSERT(ws_.wr_buf_size_ != 0);
281 remain_ = buffer_size(cb_);
282 if(remain_ > ws_.wr_buf_size_)
283 how_ = do_nomask_frag;
284 else
285 how_ = do_nomask_nofrag;
286 }
287 }
288 else
289 {
290 if(! ws_.wr_frag_)
291 {
292 how_ = do_mask_nofrag;
293 }
294 else
295 {
296 BOOST_ASSERT(ws_.wr_buf_size_ != 0);
297 remain_ = buffer_size(cb_);
298 if(remain_ > ws_.wr_buf_size_)
299 how_ = do_mask_frag;
300 else
301 how_ = do_mask_nofrag;
302 }
303 }
304
305 // Maybe suspend
306 if(ws_.wr_block_.try_lock(this))
307 {
308 // Make sure the stream is open
309 if(! ws_.check_open(ec))
310 goto upcall;
311 }
312 else
313 {
314 do_suspend:
315 // Suspend
316 BOOST_ASIO_CORO_YIELD
317 ws_.paused_wr_.emplace(std::move(*this));
318
319 // Acquire the write block
320 ws_.wr_block_.lock(this);
321
322 // Resume
323 BOOST_ASIO_CORO_YIELD
324 boost::asio::post(
325 ws_.get_executor(), std::move(*this));
326 BOOST_ASSERT(ws_.wr_block_.is_locked(this));
327
328 // Make sure the stream is open
329 if(! ws_.check_open(ec))
330 goto upcall;
331 }
332
333 //------------------------------------------------------------------
334
335 if(how_ == do_nomask_nofrag)
336 {
337 fh_.fin = fin_;
338 fh_.len = buffer_size(cb_);
339 ws_.wr_fb_.reset();
340 detail::write<flat_static_buffer_base>(
341 ws_.wr_fb_, fh_);
342 ws_.wr_cont_ = ! fin_;
343 // Send frame
344 BOOST_ASIO_CORO_YIELD
345 boost::asio::async_write(ws_.stream_,
346 buffers_cat(ws_.wr_fb_.data(), cb_),
347 std::move(*this));
348 if(! ws_.check_ok(ec))
349 goto upcall;
350 bytes_transferred_ += clamp(fh_.len);
351 goto upcall;
352 }
353
354 //------------------------------------------------------------------
355
356 else if(how_ == do_nomask_frag)
357 {
358 for(;;)
359 {
360 n = clamp(remain_, ws_.wr_buf_size_);
361 fh_.len = n;
362 remain_ -= n;
363 fh_.fin = fin_ ? remain_ == 0 : false;
364 ws_.wr_fb_.reset();
365 detail::write<flat_static_buffer_base>(
366 ws_.wr_fb_, fh_);
367 ws_.wr_cont_ = ! fin_;
368 // Send frame
369 BOOST_ASIO_CORO_YIELD
370 boost::asio::async_write(
371 ws_.stream_, buffers_cat(
372 ws_.wr_fb_.data(), buffers_prefix(
373 clamp(fh_.len), cb_)),
374 std::move(*this));
375 if(! ws_.check_ok(ec))
376 goto upcall;
377 n = clamp(fh_.len); // because yield
378 bytes_transferred_ += n;
379 if(remain_ == 0)
380 break;
381 cb_.consume(n);
382 fh_.op = detail::opcode::cont;
383 // Allow outgoing control frames to
384 // be sent in between message frames
385 ws_.wr_block_.unlock(this);
386 if( ws_.paused_close_.maybe_invoke() ||
387 ws_.paused_rd_.maybe_invoke() ||
388 ws_.paused_ping_.maybe_invoke())
389 {
390 BOOST_ASSERT(ws_.wr_block_.is_locked());
391 goto do_suspend;
392 }
393 ws_.wr_block_.lock(this);
394 }
395 goto upcall;
396 }
397
398 //------------------------------------------------------------------
399
400 else if(how_ == do_mask_nofrag)
401 {
402 remain_ = buffer_size(cb_);
403 fh_.fin = fin_;
404 fh_.len = remain_;
405 fh_.key = ws_.wr_gen_();
406 detail::prepare_key(key_, fh_.key);
407 ws_.wr_fb_.reset();
408 detail::write<flat_static_buffer_base>(
409 ws_.wr_fb_, fh_);
410 n = clamp(remain_, ws_.wr_buf_size_);
411 buffer_copy(buffer(
412 ws_.wr_buf_.get(), n), cb_);
413 detail::mask_inplace(buffer(
414 ws_.wr_buf_.get(), n), key_);
415 remain_ -= n;
416 ws_.wr_cont_ = ! fin_;
417 // Send frame header and partial payload
418 BOOST_ASIO_CORO_YIELD
419 boost::asio::async_write(
420 ws_.stream_, buffers_cat(ws_.wr_fb_.data(),
421 buffer(ws_.wr_buf_.get(), n)),
422 std::move(*this));
423 if(! ws_.check_ok(ec))
424 goto upcall;
425 bytes_transferred_ +=
426 bytes_transferred - ws_.wr_fb_.size();
427 while(remain_ > 0)
428 {
429 cb_.consume(ws_.wr_buf_size_);
430 n = clamp(remain_, ws_.wr_buf_size_);
431 buffer_copy(buffer(
432 ws_.wr_buf_.get(), n), cb_);
433 detail::mask_inplace(buffer(
434 ws_.wr_buf_.get(), n), key_);
435 remain_ -= n;
436 // Send partial payload
437 BOOST_ASIO_CORO_YIELD
438 boost::asio::async_write(ws_.stream_,
439 buffer(ws_.wr_buf_.get(), n),
440 std::move(*this));
441 if(! ws_.check_ok(ec))
442 goto upcall;
443 bytes_transferred_ += bytes_transferred;
444 }
445 goto upcall;
446 }
447
448 //------------------------------------------------------------------
449
450 else if(how_ == do_mask_frag)
451 {
452 for(;;)
453 {
454 n = clamp(remain_, ws_.wr_buf_size_);
455 remain_ -= n;
456 fh_.len = n;
457 fh_.key = ws_.wr_gen_();
458 fh_.fin = fin_ ? remain_ == 0 : false;
459 detail::prepare_key(key_, fh_.key);
460 buffer_copy(buffer(
461 ws_.wr_buf_.get(), n), cb_);
462 detail::mask_inplace(buffer(
463 ws_.wr_buf_.get(), n), key_);
464 ws_.wr_fb_.reset();
465 detail::write<flat_static_buffer_base>(
466 ws_.wr_fb_, fh_);
467 ws_.wr_cont_ = ! fin_;
468 // Send frame
469 BOOST_ASIO_CORO_YIELD
470 boost::asio::async_write(ws_.stream_,
471 buffers_cat(ws_.wr_fb_.data(),
472 buffer(ws_.wr_buf_.get(), n)),
473 std::move(*this));
474 if(! ws_.check_ok(ec))
475 goto upcall;
476 n = bytes_transferred - ws_.wr_fb_.size();
477 bytes_transferred_ += n;
478 if(remain_ == 0)
479 break;
480 cb_.consume(n);
481 fh_.op = detail::opcode::cont;
482 // Allow outgoing control frames to
483 // be sent in between message frames:
484 ws_.wr_block_.unlock(this);
485 if( ws_.paused_close_.maybe_invoke() ||
486 ws_.paused_rd_.maybe_invoke() ||
487 ws_.paused_ping_.maybe_invoke())
488 {
489 BOOST_ASSERT(ws_.wr_block_.is_locked());
490 goto do_suspend;
491 }
492 ws_.wr_block_.lock(this);
493 }
494 goto upcall;
495 }
496
497 //------------------------------------------------------------------
498
499 else if(how_ == do_deflate)
500 {
501 for(;;)
502 {
503 b = buffer(ws_.wr_buf_.get(),
504 ws_.wr_buf_size_);
505 more_ = ws_.deflate(b, cb_, fin_, in_, ec);
506 if(! ws_.check_ok(ec))
507 goto upcall;
508 n = buffer_size(b);
509 if(n == 0)
510 {
511 // The input was consumed, but there
512 // is no output due to compression
513 // latency.
514 BOOST_ASSERT(! fin_);
515 BOOST_ASSERT(buffer_size(cb_) == 0);
516 goto upcall;
517 }
518 if(fh_.mask)
519 {
520 fh_.key = ws_.wr_gen_();
521 detail::prepared_key key;
522 detail::prepare_key(key, fh_.key);
523 detail::mask_inplace(b, key);
524 }
525 fh_.fin = ! more_;
526 fh_.len = n;
527 ws_.wr_fb_.reset();
528 detail::write<
529 flat_static_buffer_base>(ws_.wr_fb_, fh_);
530 ws_.wr_cont_ = ! fin_;
531 // Send frame
532 BOOST_ASIO_CORO_YIELD
533 boost::asio::async_write(ws_.stream_,
534 buffers_cat(ws_.wr_fb_.data(), b),
535 std::move(*this));
536 if(! ws_.check_ok(ec))
537 goto upcall;
538 bytes_transferred_ += in_;
539 if(more_)
540 {
541 fh_.op = detail::opcode::cont;
542 fh_.rsv1 = false;
543 // Allow outgoing control frames to
544 // be sent in between message frames:
545 ws_.wr_block_.unlock(this);
546 if( ws_.paused_close_.maybe_invoke() ||
547 ws_.paused_rd_.maybe_invoke() ||
548 ws_.paused_ping_.maybe_invoke())
549 {
550 BOOST_ASSERT(ws_.wr_block_.is_locked());
551 goto do_suspend;
552 }
553 ws_.wr_block_.lock(this);
554 }
555 else
556 {
557 if(fh_.fin)
558 ws_.do_context_takeover_write(ws_.role_);
559 goto upcall;
560 }
561 }
562 }
563
564 //--------------------------------------------------------------------------
565
566 upcall:
567 ws_.wr_block_.unlock(this);
568 ws_.paused_close_.maybe_invoke() ||
569 ws_.paused_rd_.maybe_invoke() ||
570 ws_.paused_ping_.maybe_invoke();
571 if(! cont_)
572 return boost::asio::post(
573 ws_.stream_.get_executor(),
574 bind_handler(std::move(h_), ec, bytes_transferred_));
575 h_(ec, bytes_transferred_);
576 }
577 }
578
579 //------------------------------------------------------------------------------
580
581 template<class NextLayer, bool deflateSupported>
582 template<class ConstBufferSequence>
583 std::size_t
584 stream<NextLayer, deflateSupported>::
585 write_some(bool fin, ConstBufferSequence const& buffers)
586 {
587 static_assert(is_sync_stream<next_layer_type>::value,
588 "SyncStream requirements not met");
589 static_assert(boost::asio::is_const_buffer_sequence<
590 ConstBufferSequence>::value,
591 "ConstBufferSequence requirements not met");
592 error_code ec;
593 auto const bytes_transferred =
594 write_some(fin, buffers, ec);
595 if(ec)
596 BOOST_THROW_EXCEPTION(system_error{ec});
597 return bytes_transferred;
598 }
599
600 template<class NextLayer, bool deflateSupported>
601 template<class ConstBufferSequence>
602 std::size_t
603 stream<NextLayer, deflateSupported>::
604 write_some(bool fin,
605 ConstBufferSequence const& buffers, error_code& ec)
606 {
607 static_assert(is_sync_stream<next_layer_type>::value,
608 "SyncStream requirements not met");
609 static_assert(boost::asio::is_const_buffer_sequence<
610 ConstBufferSequence>::value,
611 "ConstBufferSequence requirements not met");
612 using beast::detail::clamp;
613 using boost::asio::buffer;
614 using boost::asio::buffer_copy;
615 using boost::asio::buffer_size;
616 std::size_t bytes_transferred = 0;
617 ec.assign(0, ec.category());
618 // Make sure the stream is open
619 if(! check_open(ec))
620 return bytes_transferred;
621 detail::frame_header fh;
622 if(! wr_cont_)
623 {
624 begin_msg();
625 fh.rsv1 = wr_compress_;
626 }
627 else
628 {
629 fh.rsv1 = false;
630 }
631 fh.rsv2 = false;
632 fh.rsv3 = false;
633 fh.op = wr_cont_ ?
634 detail::opcode::cont : wr_opcode_;
635 fh.mask = role_ == role_type::client;
636 auto remain = buffer_size(buffers);
637 if(wr_compress_)
638 {
639 buffers_suffix<
640 ConstBufferSequence> cb{buffers};
641 for(;;)
642 {
643 auto b = buffer(
644 wr_buf_.get(), wr_buf_size_);
645 auto const more = this->deflate(
646 b, cb, fin, bytes_transferred, ec);
647 if(! check_ok(ec))
648 return bytes_transferred;
649 auto const n = buffer_size(b);
650 if(n == 0)
651 {
652 // The input was consumed, but there
653 // is no output due to compression
654 // latency.
655 BOOST_ASSERT(! fin);
656 BOOST_ASSERT(buffer_size(cb) == 0);
657 fh.fin = false;
658 break;
659 }
660 if(fh.mask)
661 {
662 fh.key = wr_gen_();
663 detail::prepared_key key;
664 detail::prepare_key(key, fh.key);
665 detail::mask_inplace(b, key);
666 }
667 fh.fin = ! more;
668 fh.len = n;
669 detail::fh_buffer fh_buf;
670 detail::write<
671 flat_static_buffer_base>(fh_buf, fh);
672 wr_cont_ = ! fin;
673 boost::asio::write(stream_,
674 buffers_cat(fh_buf.data(), b), ec);
675 if(! check_ok(ec))
676 return bytes_transferred;
677 if(! more)
678 break;
679 fh.op = detail::opcode::cont;
680 fh.rsv1 = false;
681 }
682 if(fh.fin)
683 this->do_context_takeover_write(role_);
684 }
685 else if(! fh.mask)
686 {
687 if(! wr_frag_)
688 {
689 // no mask, no autofrag
690 fh.fin = fin;
691 fh.len = remain;
692 detail::fh_buffer fh_buf;
693 detail::write<
694 flat_static_buffer_base>(fh_buf, fh);
695 wr_cont_ = ! fin;
696 boost::asio::write(stream_,
697 buffers_cat(fh_buf.data(), buffers), ec);
698 if(! check_ok(ec))
699 return bytes_transferred;
700 bytes_transferred += remain;
701 }
702 else
703 {
704 // no mask, autofrag
705 BOOST_ASSERT(wr_buf_size_ != 0);
706 buffers_suffix<
707 ConstBufferSequence> cb{buffers};
708 for(;;)
709 {
710 auto const n = clamp(remain, wr_buf_size_);
711 remain -= n;
712 fh.len = n;
713 fh.fin = fin ? remain == 0 : false;
714 detail::fh_buffer fh_buf;
715 detail::write<
716 flat_static_buffer_base>(fh_buf, fh);
717 wr_cont_ = ! fin;
718 boost::asio::write(stream_,
719 buffers_cat(fh_buf.data(),
720 buffers_prefix(n, cb)), ec);
721 if(! check_ok(ec))
722 return bytes_transferred;
723 bytes_transferred += n;
724 if(remain == 0)
725 break;
726 fh.op = detail::opcode::cont;
727 cb.consume(n);
728 }
729 }
730 }
731 else if(! wr_frag_)
732 {
733 // mask, no autofrag
734 fh.fin = fin;
735 fh.len = remain;
736 fh.key = wr_gen_();
737 detail::prepared_key key;
738 detail::prepare_key(key, fh.key);
739 detail::fh_buffer fh_buf;
740 detail::write<
741 flat_static_buffer_base>(fh_buf, fh);
742 buffers_suffix<
743 ConstBufferSequence> cb{buffers};
744 {
745 auto const n = clamp(remain, wr_buf_size_);
746 auto const b = buffer(wr_buf_.get(), n);
747 buffer_copy(b, cb);
748 cb.consume(n);
749 remain -= n;
750 detail::mask_inplace(b, key);
751 wr_cont_ = ! fin;
752 boost::asio::write(stream_,
753 buffers_cat(fh_buf.data(), b), ec);
754 if(! check_ok(ec))
755 return bytes_transferred;
756 bytes_transferred += n;
757 }
758 while(remain > 0)
759 {
760 auto const n = clamp(remain, wr_buf_size_);
761 auto const b = buffer(wr_buf_.get(), n);
762 buffer_copy(b, cb);
763 cb.consume(n);
764 remain -= n;
765 detail::mask_inplace(b, key);
766 boost::asio::write(stream_, b, ec);
767 if(! check_ok(ec))
768 return bytes_transferred;
769 bytes_transferred += n;
770 }
771 }
772 else
773 {
774 // mask, autofrag
775 BOOST_ASSERT(wr_buf_size_ != 0);
776 buffers_suffix<
777 ConstBufferSequence> cb{buffers};
778 for(;;)
779 {
780 fh.key = wr_gen_();
781 detail::prepared_key key;
782 detail::prepare_key(key, fh.key);
783 auto const n = clamp(remain, wr_buf_size_);
784 auto const b = buffer(wr_buf_.get(), n);
785 buffer_copy(b, cb);
786 detail::mask_inplace(b, key);
787 fh.len = n;
788 remain -= n;
789 fh.fin = fin ? remain == 0 : false;
790 wr_cont_ = ! fh.fin;
791 detail::fh_buffer fh_buf;
792 detail::write<
793 flat_static_buffer_base>(fh_buf, fh);
794 boost::asio::write(stream_,
795 buffers_cat(fh_buf.data(), b), ec);
796 if(! check_ok(ec))
797 return bytes_transferred;
798 bytes_transferred += n;
799 if(remain == 0)
800 break;
801 fh.op = detail::opcode::cont;
802 cb.consume(n);
803 }
804 }
805 return bytes_transferred;
806 }
807
808 template<class NextLayer, bool deflateSupported>
809 template<class ConstBufferSequence, class WriteHandler>
810 BOOST_ASIO_INITFN_RESULT_TYPE(
811 WriteHandler, void(error_code, std::size_t))
812 stream<NextLayer, deflateSupported>::
813 async_write_some(bool fin,
814 ConstBufferSequence const& bs, WriteHandler&& handler)
815 {
816 static_assert(is_async_stream<next_layer_type>::value,
817 "AsyncStream requirements not met");
818 static_assert(boost::asio::is_const_buffer_sequence<
819 ConstBufferSequence>::value,
820 "ConstBufferSequence requirements not met");
821 BOOST_BEAST_HANDLER_INIT(
822 WriteHandler, void(error_code, std::size_t));
823 write_some_op<ConstBufferSequence, BOOST_ASIO_HANDLER_TYPE(
824 WriteHandler, void(error_code, std::size_t))>{
825 std::move(init.completion_handler), *this, fin, bs}(
826 {}, 0, false);
827 return init.result.get();
828 }
829
830 //------------------------------------------------------------------------------
831
832 template<class NextLayer, bool deflateSupported>
833 template<class ConstBufferSequence>
834 std::size_t
835 stream<NextLayer, deflateSupported>::
836 write(ConstBufferSequence const& buffers)
837 {
838 static_assert(is_sync_stream<next_layer_type>::value,
839 "SyncStream requirements not met");
840 static_assert(boost::asio::is_const_buffer_sequence<
841 ConstBufferSequence>::value,
842 "ConstBufferSequence requirements not met");
843 error_code ec;
844 auto const bytes_transferred = write(buffers, ec);
845 if(ec)
846 BOOST_THROW_EXCEPTION(system_error{ec});
847 return bytes_transferred;
848 }
849
850 template<class NextLayer, bool deflateSupported>
851 template<class ConstBufferSequence>
852 std::size_t
853 stream<NextLayer, deflateSupported>::
854 write(ConstBufferSequence const& buffers, error_code& ec)
855 {
856 static_assert(is_sync_stream<next_layer_type>::value,
857 "SyncStream requirements not met");
858 static_assert(boost::asio::is_const_buffer_sequence<
859 ConstBufferSequence>::value,
860 "ConstBufferSequence requirements not met");
861 return write_some(true, buffers, ec);
862 }
863
864 template<class NextLayer, bool deflateSupported>
865 template<class ConstBufferSequence, class WriteHandler>
866 BOOST_ASIO_INITFN_RESULT_TYPE(
867 WriteHandler, void(error_code, std::size_t))
868 stream<NextLayer, deflateSupported>::
869 async_write(
870 ConstBufferSequence const& bs, WriteHandler&& handler)
871 {
872 static_assert(is_async_stream<next_layer_type>::value,
873 "AsyncStream requirements not met");
874 static_assert(boost::asio::is_const_buffer_sequence<
875 ConstBufferSequence>::value,
876 "ConstBufferSequence requirements not met");
877 BOOST_BEAST_HANDLER_INIT(
878 WriteHandler, void(error_code, std::size_t));
879 write_some_op<ConstBufferSequence, BOOST_ASIO_HANDLER_TYPE(
880 WriteHandler, void(error_code, std::size_t))>{
881 std::move(init.completion_handler), *this, true, bs}(
882 {}, 0, false);
883 return init.result.get();
884 }
885
886 } // websocket
887 } // beast
888 } // boost
889
890 #endif