]> git.proxmox.com Git - ceph.git/blame - ceph/src/osd/ECBackend.h
update sources to v12.2.3
[ceph.git] / ceph / src / osd / ECBackend.h
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2013 Inktank Storage, Inc.
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15#ifndef ECBACKEND_H
16#define ECBACKEND_H
17
18#include <boost/intrusive/set.hpp>
19#include <boost/intrusive/list.hpp>
20
21#include "OSD.h"
22#include "PGBackend.h"
23#include "erasure-code/ErasureCodeInterface.h"
24#include "ECUtil.h"
25#include "ECTransaction.h"
26#include "ExtentCache.h"
27
28//forward declaration
29struct ECSubWrite;
30struct ECSubWriteReply;
31struct ECSubRead;
32struct ECSubReadReply;
33
34struct RecoveryMessages;
35class ECBackend : public PGBackend {
36public:
37 RecoveryHandle *open_recovery_op() override;
38
39 void run_recovery_op(
40 RecoveryHandle *h,
41 int priority
42 ) override;
43
224ce89b 44 int recover_object(
7c673cae
FG
45 const hobject_t &hoid,
46 eversion_t v,
47 ObjectContextRef head,
48 ObjectContextRef obc,
49 RecoveryHandle *h
50 ) override;
51
c07f9fc5 52 bool _handle_message(
7c673cae
FG
53 OpRequestRef op
54 ) override;
55 bool can_handle_while_inactive(
56 OpRequestRef op
57 ) override;
58 friend struct SubWriteApplied;
59 friend struct SubWriteCommitted;
60 void sub_write_applied(
61 ceph_tid_t tid,
62 eversion_t version,
63 const ZTracer::Trace &trace);
64 void sub_write_committed(
65 ceph_tid_t tid,
66 eversion_t version,
67 eversion_t last_complete,
68 const ZTracer::Trace &trace);
69 void handle_sub_write(
70 pg_shard_t from,
71 OpRequestRef msg,
72 ECSubWrite &op,
73 const ZTracer::Trace &trace,
74 Context *on_local_applied_sync = 0
75 );
76 void handle_sub_read(
77 pg_shard_t from,
78 const ECSubRead &op,
79 ECSubReadReply *reply,
80 const ZTracer::Trace &trace
81 );
82 void handle_sub_write_reply(
83 pg_shard_t from,
84 const ECSubWriteReply &op,
85 const ZTracer::Trace &trace
86 );
87 void handle_sub_read_reply(
88 pg_shard_t from,
89 ECSubReadReply &op,
90 RecoveryMessages *m,
91 const ZTracer::Trace &trace
92 );
93
94 /// @see ReadOp below
95 void check_recovery_sources(const OSDMapRef& osdmap) override;
96
97 void on_change() override;
98 void clear_recovery_state() override;
99
100 void on_flushed() override;
101
102 void dump_recovery_info(Formatter *f) const override;
103
104 void call_write_ordered(std::function<void(void)> &&cb) override;
105
106 void submit_transaction(
107 const hobject_t &hoid,
108 const object_stat_sum_t &delta_stats,
109 const eversion_t &at_version,
110 PGTransactionUPtr &&t,
111 const eversion_t &trim_to,
112 const eversion_t &roll_forward_to,
113 const vector<pg_log_entry_t> &log_entries,
114 boost::optional<pg_hit_set_history_t> &hset_history,
115 Context *on_local_applied_sync,
116 Context *on_all_applied,
117 Context *on_all_commit,
118 ceph_tid_t tid,
119 osd_reqid_t reqid,
120 OpRequestRef op
121 ) override;
122
123 int objects_read_sync(
124 const hobject_t &hoid,
125 uint64_t off,
126 uint64_t len,
127 uint32_t op_flags,
128 bufferlist *bl) override;
129
130 /**
131 * Async read mechanism
132 *
133 * Async reads use the same async read mechanism as does recovery.
134 * CallClientContexts is responsible for reconstructing the response
135 * buffer as well as for calling the callbacks.
136 *
137 * One tricky bit is that two reads may possibly not read from the same
138 * set of replicas. This could result in two reads completing in the
139 * wrong (from the interface user's point of view) order. Thus, we
140 * maintain a queue of in progress reads (@see in_progress_client_reads)
141 * to ensure that we always call the completion callback in order.
142 *
143 * Another subtely is that while we may read a degraded object, we will
144 * still only perform a client read from shards in the acting set. This
145 * ensures that we won't ever have to restart a client initiated read in
146 * check_recovery_sources.
147 */
148 void objects_read_and_reconstruct(
149 const map<hobject_t, std::list<boost::tuple<uint64_t, uint64_t, uint32_t> >
150 > &reads,
151 bool fast_read,
152 GenContextURef<map<hobject_t,pair<int, extent_map> > &&> &&func);
153
154 friend struct CallClientContexts;
155 struct ClientAsyncReadStatus {
156 unsigned objects_to_read;
157 GenContextURef<map<hobject_t,pair<int, extent_map> > &&> func;
158 map<hobject_t,pair<int, extent_map> > results;
159 explicit ClientAsyncReadStatus(
160 unsigned objects_to_read,
161 GenContextURef<map<hobject_t,pair<int, extent_map> > &&> &&func)
162 : objects_to_read(objects_to_read), func(std::move(func)) {}
163 void complete_object(
164 const hobject_t &hoid,
165 int err,
166 extent_map &&buffers) {
167 assert(objects_to_read);
168 --objects_to_read;
169 assert(!results.count(hoid));
170 results.emplace(hoid, make_pair(err, std::move(buffers)));
171 }
172 bool is_complete() const {
173 return objects_to_read == 0;
174 }
175 void run() {
176 func.release()->complete(std::move(results));
177 }
178 };
179 list<ClientAsyncReadStatus> in_progress_client_reads;
180 void objects_read_async(
181 const hobject_t &hoid,
182 const list<pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
183 pair<bufferlist*, Context*> > > &to_read,
184 Context *on_complete,
185 bool fast_read = false) override;
186
187 template <typename Func>
188 void objects_read_async_no_cache(
189 const map<hobject_t,extent_set> &to_read,
190 Func &&on_complete) {
191 map<hobject_t,std::list<boost::tuple<uint64_t, uint64_t, uint32_t> > > _to_read;
192 for (auto &&hpair: to_read) {
193 auto &l = _to_read[hpair.first];
194 for (auto extent: hpair.second) {
195 l.emplace_back(extent.first, extent.second, 0);
196 }
197 }
198 objects_read_and_reconstruct(
199 _to_read,
200 false,
201 make_gen_lambda_context<
202 map<hobject_t,pair<int, extent_map> > &&, Func>(
203 std::forward<Func>(on_complete)));
204 }
205 void kick_reads() {
206 while (in_progress_client_reads.size() &&
207 in_progress_client_reads.front().is_complete()) {
208 in_progress_client_reads.front().run();
209 in_progress_client_reads.pop_front();
210 }
211 }
212
213private:
214 friend struct ECRecoveryHandle;
215 uint64_t get_recovery_chunk_size() const {
216 return ROUND_UP_TO(cct->_conf->osd_recovery_max_chunk,
217 sinfo.get_stripe_width());
218 }
219
220 void get_want_to_read_shards(set<int> *want_to_read) const {
221 const vector<int> &chunk_mapping = ec_impl->get_chunk_mapping();
222 for (int i = 0; i < (int)ec_impl->get_data_chunk_count(); ++i) {
223 int chunk = (int)chunk_mapping.size() > i ? chunk_mapping[i] : i;
224 want_to_read->insert(chunk);
225 }
226 }
227
228 /**
229 * Recovery
230 *
231 * Recovery uses the same underlying read mechanism as client reads
232 * with the slight difference that recovery reads may come from non
233 * acting shards. Thus, check_recovery_sources may wind up calling
234 * cancel_pull for a read originating with RecoveryOp.
235 *
236 * The recovery process is expressed as a state machine:
237 * - IDLE: Nothing is currently in progress, reads will be started and
238 * we will transition to READING
239 * - READING: We are awaiting a pending read op. Once complete, we will
240 * decode the buffers and proceed to WRITING
241 * - WRITING: We are awaiting a completed push. Once complete, we will
242 * either transition to COMPLETE or to IDLE to continue.
243 * - COMPLETE: complete
244 *
245 * We use the existing Push and PushReply messages and structures to
246 * handle actually shuffling the data over to the replicas. recovery_info
247 * and recovery_progress are expressed in terms of the logical offset
248 * space except for data_included which is in terms of the chunked object
249 * space (to match the passed buffer).
250 *
251 * xattrs are requested on the first read and used to initialize the
252 * object_context if missing on completion of the first read.
253 *
254 * In order to batch up reads and writes, we batch Push, PushReply,
255 * Transaction, and reads in a RecoveryMessages object which is passed
256 * among the recovery methods.
257 */
258 struct RecoveryOp {
259 hobject_t hoid;
260 eversion_t v;
261 set<pg_shard_t> missing_on;
262 set<shard_id_t> missing_on_shards;
263
264 ObjectRecoveryInfo recovery_info;
265 ObjectRecoveryProgress recovery_progress;
266
267 enum state_t { IDLE, READING, WRITING, COMPLETE } state;
268
269 static const char* tostr(state_t state) {
270 switch (state) {
271 case ECBackend::RecoveryOp::IDLE:
272 return "IDLE";
273 break;
274 case ECBackend::RecoveryOp::READING:
275 return "READING";
276 break;
277 case ECBackend::RecoveryOp::WRITING:
278 return "WRITING";
279 break;
280 case ECBackend::RecoveryOp::COMPLETE:
281 return "COMPLETE";
282 break;
283 default:
284 ceph_abort();
285 return "";
286 }
287 }
288
289 // must be filled if state == WRITING
290 map<int, bufferlist> returned_data;
291 map<string, bufferlist> xattrs;
292 ECUtil::HashInfoRef hinfo;
293 ObjectContextRef obc;
294 set<pg_shard_t> waiting_on_pushes;
295
296 // valid in state READING
297 pair<uint64_t, uint64_t> extent_requested;
298
299 void dump(Formatter *f) const;
300
301 RecoveryOp() : state(IDLE) {}
302 };
303 friend ostream &operator<<(ostream &lhs, const RecoveryOp &rhs);
304 map<hobject_t, RecoveryOp> recovery_ops;
305
306 void continue_recovery_op(
307 RecoveryOp &op,
308 RecoveryMessages *m);
309 void dispatch_recovery_messages(RecoveryMessages &m, int priority);
310 friend struct OnRecoveryReadComplete;
311 void handle_recovery_read_complete(
312 const hobject_t &hoid,
313 boost::tuple<uint64_t, uint64_t, map<pg_shard_t, bufferlist> > &to_read,
314 boost::optional<map<string, bufferlist> > attrs,
315 RecoveryMessages *m);
316 void handle_recovery_push(
317 const PushOp &op,
318 RecoveryMessages *m);
319 void handle_recovery_push_reply(
320 const PushReplyOp &op,
321 pg_shard_t from,
322 RecoveryMessages *m);
b32b8144
FG
323 void get_all_avail_shards(
324 const hobject_t &hoid,
325 set<int> &have,
326 map<shard_id_t, pg_shard_t> &shards,
327 bool for_recovery);
7c673cae
FG
328
329public:
330 /**
331 * Low level async read mechanism
332 *
333 * To avoid duplicating the logic for requesting and waiting for
334 * multiple object shards, there is a common async read mechanism
335 * taking a map of hobject_t->read_request_t which defines callbacks
336 * taking read_result_ts as arguments.
337 *
338 * tid_to_read_map gives open read ops. check_recovery_sources uses
339 * shard_to_read_map and ReadOp::source_to_obj to restart reads
340 * involving down osds.
341 *
342 * The user is responsible for specifying replicas on which to read
343 * and for reassembling the buffer on the other side since client
344 * reads require the original object buffer while recovery only needs
345 * the missing pieces.
346 *
347 * Rather than handling reads on the primary directly, we simply send
348 * ourselves a message. This avoids a dedicated primary path for that
349 * part.
350 */
351 struct read_result_t {
352 int r;
353 map<pg_shard_t, int> errors;
354 boost::optional<map<string, bufferlist> > attrs;
355 list<
356 boost::tuple<
357 uint64_t, uint64_t, map<pg_shard_t, bufferlist> > > returned;
358 read_result_t() : r(0) {}
359 };
360 struct read_request_t {
361 const list<boost::tuple<uint64_t, uint64_t, uint32_t> > to_read;
362 const set<pg_shard_t> need;
363 const bool want_attrs;
364 GenContext<pair<RecoveryMessages *, read_result_t& > &> *cb;
365 read_request_t(
366 const list<boost::tuple<uint64_t, uint64_t, uint32_t> > &to_read,
367 const set<pg_shard_t> &need,
368 bool want_attrs,
369 GenContext<pair<RecoveryMessages *, read_result_t& > &> *cb)
370 : to_read(to_read), need(need), want_attrs(want_attrs),
371 cb(cb) {}
372 };
373 friend ostream &operator<<(ostream &lhs, const read_request_t &rhs);
374
375 struct ReadOp {
376 int priority;
377 ceph_tid_t tid;
378 OpRequestRef op; // may be null if not on behalf of a client
379 // True if redundant reads are issued, false otherwise,
380 // this is useful to tradeoff some resources (redundant ops) for
381 // low latency read, especially on relatively idle cluster
382 bool do_redundant_reads;
383 // True if reading for recovery which could possibly reading only a subset
384 // of the available shards.
385 bool for_recovery;
386
387 ZTracer::Trace trace;
388
389 map<hobject_t, read_request_t> to_read;
390 map<hobject_t, read_result_t> complete;
391
392 map<hobject_t, set<pg_shard_t>> obj_to_source;
393 map<pg_shard_t, set<hobject_t> > source_to_obj;
394
395 void dump(Formatter *f) const;
396
397 set<pg_shard_t> in_progress;
398
399 ReadOp(
400 int priority,
401 ceph_tid_t tid,
402 bool do_redundant_reads,
403 bool for_recovery,
404 OpRequestRef op,
405 map<hobject_t, read_request_t> &&_to_read)
406 : priority(priority), tid(tid), op(op), do_redundant_reads(do_redundant_reads),
407 for_recovery(for_recovery), to_read(std::move(_to_read)) {
408 for (auto &&hpair: to_read) {
409 auto &returned = complete[hpair.first].returned;
410 for (auto &&extent: hpair.second.to_read) {
411 returned.push_back(
412 boost::make_tuple(
413 extent.get<0>(),
414 extent.get<1>(),
415 map<pg_shard_t, bufferlist>()));
416 }
417 }
418 }
419 ReadOp() = delete;
420 ReadOp(const ReadOp &) = default;
421 ReadOp(ReadOp &&) = default;
422 };
423 friend struct FinishReadOp;
424 void filter_read_op(
425 const OSDMapRef& osdmap,
426 ReadOp &op);
427 void complete_read_op(ReadOp &rop, RecoveryMessages *m);
428 friend ostream &operator<<(ostream &lhs, const ReadOp &rhs);
429 map<ceph_tid_t, ReadOp> tid_to_read_map;
430 map<pg_shard_t, set<ceph_tid_t> > shard_to_read_map;
431 void start_read_op(
432 int priority,
433 map<hobject_t, read_request_t> &to_read,
434 OpRequestRef op,
435 bool do_redundant_reads, bool for_recovery);
436
437 void do_read_op(ReadOp &rop);
438 int send_all_remaining_reads(
439 const hobject_t &hoid,
440 ReadOp &rop);
441
442
443 /**
444 * Client writes
445 *
446 * ECTransaction is responsible for generating a transaction for
447 * each shard to which we need to send the write. As required
448 * by the PGBackend interface, the ECBackend write mechanism
449 * passes trim information with the write and last_complete back
450 * with the reply.
451 *
452 * As with client reads, there is a possibility of out-of-order
453 * completions. Thus, callbacks and completion are called in order
454 * on the writing list.
455 */
456 struct Op : boost::intrusive::list_base_hook<> {
457 /// From submit_transaction caller, decribes operation
458 hobject_t hoid;
459 object_stat_sum_t delta_stats;
460 eversion_t version;
461 eversion_t trim_to;
462 boost::optional<pg_hit_set_history_t> updated_hit_set_history;
463 vector<pg_log_entry_t> log_entries;
464 ceph_tid_t tid;
465 osd_reqid_t reqid;
466 ZTracer::Trace trace;
467
468 eversion_t roll_forward_to; /// Soon to be generated internally
469
470 /// Ancillary also provided from submit_transaction caller
471 map<hobject_t, ObjectContextRef> obc_map;
472
473 /// see call_write_ordered
474 std::list<std::function<void(void)> > on_write;
475
476 /// Generated internally
477 set<hobject_t> temp_added;
478 set<hobject_t> temp_cleared;
479
480 ECTransaction::WritePlan plan;
481 bool requires_rmw() const { return !plan.to_read.empty(); }
482 bool invalidates_cache() const { return plan.invalidates_cache; }
483
484 // must be true if requires_rmw(), must be false if invalidates_cache()
485 bool using_cache = false;
486
487 /// In progress read state;
488 map<hobject_t,extent_set> pending_read; // subset already being read
489 map<hobject_t,extent_set> remote_read; // subset we must read
490 map<hobject_t,extent_map> remote_read_result;
491 bool read_in_progress() const {
492 return !remote_read.empty() && remote_read_result.empty();
493 }
494
495 /// In progress write state
496 set<pg_shard_t> pending_commit;
497 set<pg_shard_t> pending_apply;
498 bool write_in_progress() const {
499 return !pending_commit.empty() || !pending_apply.empty();
500 }
501
502 /// optional, may be null, for tracking purposes
503 OpRequestRef client_op;
504
505 /// pin for cache
506 ExtentCache::write_pin pin;
507
508 /// Callbacks
509 Context *on_local_applied_sync = nullptr;
510 Context *on_all_applied = nullptr;
511 Context *on_all_commit = nullptr;
512 ~Op() {
513 delete on_local_applied_sync;
514 delete on_all_applied;
515 delete on_all_commit;
516 }
517 };
518 using op_list = boost::intrusive::list<Op>;
519 friend ostream &operator<<(ostream &lhs, const Op &rhs);
520
521 ExtentCache cache;
522 map<ceph_tid_t, Op> tid_to_op_map; /// Owns Op structure
523
524 /**
525 * We model the possible rmw states as a set of waitlists.
526 * All writes at this time complete in order, so a write blocked
527 * at waiting_state blocks all writes behind it as well (same for
528 * other states).
529 *
530 * Future work: We can break this up into a per-object pipeline
531 * (almost). First, provide an ordering token to submit_transaction
532 * and require that all operations within a single transaction take
533 * place on a subset of hobject_t space partitioned by that token
534 * (the hashid seem about right to me -- even works for temp objects
535 * if you recall that a temp object created for object head foo will
536 * only ever be referenced by other transactions on foo and aren't
537 * reused). Next, factor this part into a class and maintain one per
538 * ordering token. Next, fixup PrimaryLogPG's repop queue to be
539 * partitioned by ordering token. Finally, refactor the op pipeline
540 * so that the log entries passed into submit_tranaction aren't
541 * versioned. We can't assign versions to them until we actually
542 * submit the operation. That's probably going to be the hard part.
543 */
544 class pipeline_state_t {
545 enum {
546 CACHE_VALID = 0,
547 CACHE_INVALID = 1
548 } pipeline_state = CACHE_VALID;
549 public:
550 bool caching_enabled() const {
551 return pipeline_state == CACHE_VALID;
552 }
553 bool cache_invalid() const {
554 return !caching_enabled();
555 }
556 void invalidate() {
557 pipeline_state = CACHE_INVALID;
558 }
559 void clear() {
560 pipeline_state = CACHE_VALID;
561 }
562 friend ostream &operator<<(ostream &lhs, const pipeline_state_t &rhs);
563 } pipeline_state;
564
565
566 op_list waiting_state; /// writes waiting on pipe_state
567 op_list waiting_reads; /// writes waiting on partial stripe reads
568 op_list waiting_commit; /// writes waiting on initial commit
569 eversion_t completed_to;
570 eversion_t committed_to;
571 void start_rmw(Op *op, PGTransactionUPtr &&t);
572 bool try_state_to_reads();
573 bool try_reads_to_commit();
574 bool try_finish_rmw();
575 void check_ops();
576
577 ErasureCodeInterfaceRef ec_impl;
578
579
580 /**
581 * ECRecPred
582 *
583 * Determines the whether _have is suffient to recover an object
584 */
585 class ECRecPred : public IsPGRecoverablePredicate {
586 set<int> want;
587 ErasureCodeInterfaceRef ec_impl;
588 public:
589 explicit ECRecPred(ErasureCodeInterfaceRef ec_impl) : ec_impl(ec_impl) {
590 for (unsigned i = 0; i < ec_impl->get_chunk_count(); ++i) {
591 want.insert(i);
592 }
593 }
594 bool operator()(const set<pg_shard_t> &_have) const override {
595 set<int> have;
596 for (set<pg_shard_t>::const_iterator i = _have.begin();
597 i != _have.end();
598 ++i) {
599 have.insert(i->shard);
600 }
601 set<int> min;
602 return ec_impl->minimum_to_decode(want, have, &min) == 0;
603 }
604 };
605 IsPGRecoverablePredicate *get_is_recoverable_predicate() override {
606 return new ECRecPred(ec_impl);
607 }
608
609 /**
610 * ECReadPred
611 *
612 * Determines the whether _have is suffient to read an object
613 */
614 class ECReadPred : public IsPGReadablePredicate {
615 pg_shard_t whoami;
616 ECRecPred rec_pred;
617 public:
618 ECReadPred(
619 pg_shard_t whoami,
620 ErasureCodeInterfaceRef ec_impl) : whoami(whoami), rec_pred(ec_impl) {}
621 bool operator()(const set<pg_shard_t> &_have) const override {
622 return _have.count(whoami) && rec_pred(_have);
623 }
624 };
625 IsPGReadablePredicate *get_is_readable_predicate() override {
626 return new ECReadPred(get_parent()->whoami_shard(), ec_impl);
627 }
628
629
630 const ECUtil::stripe_info_t sinfo;
631 /// If modified, ensure that the ref is held until the update is applied
632 SharedPtrRegistry<hobject_t, ECUtil::HashInfo> unstable_hashinfo_registry;
633 ECUtil::HashInfoRef get_hash_info(const hobject_t &hoid, bool checks = true,
634 const map<string,bufferptr> *attr = NULL);
635
636public:
637 ECBackend(
638 PGBackend::Listener *pg,
639 coll_t coll,
640 ObjectStore::CollectionHandle &ch,
641 ObjectStore *store,
642 CephContext *cct,
643 ErasureCodeInterfaceRef ec_impl,
644 uint64_t stripe_width);
645
646 /// Returns to_read replicas sufficient to reconstruct want
647 int get_min_avail_to_read_shards(
648 const hobject_t &hoid, ///< [in] object
649 const set<int> &want, ///< [in] desired shards
650 bool for_recovery, ///< [in] true if we may use non-acting replicas
651 bool do_redundant_reads, ///< [in] true if we want to issue redundant reads to reduce latency
652 set<pg_shard_t> *to_read ///< [out] shards to read
653 ); ///< @return error code, 0 on success
654
655 int get_remaining_shards(
656 const hobject_t &hoid,
657 const set<int> &avail,
b32b8144
FG
658 set<pg_shard_t> *to_read,
659 bool for_recovery);
7c673cae
FG
660
661 int objects_get_attrs(
662 const hobject_t &hoid,
663 map<string, bufferlist> *out) override;
664
665 void rollback_append(
666 const hobject_t &hoid,
667 uint64_t old_size,
668 ObjectStore::Transaction *t) override;
669
670 bool scrub_supported() override { return true; }
671 bool auto_repair_supported() const override { return true; }
672
673 void be_deep_scrub(
674 const hobject_t &obj,
675 uint32_t seed,
676 ScrubMap::object &o,
677 ThreadPool::TPHandle &handle) override;
678 uint64_t be_get_ondisk_size(uint64_t logical_size) override {
679 return sinfo.logical_to_next_chunk_offset(logical_size);
680 }
681 void _failed_push(const hobject_t &hoid,
682 pair<RecoveryMessages *, ECBackend::read_result_t &> &in);
683};
684ostream &operator<<(ostream &lhs, const ECBackend::pipeline_state_t &rhs);
685
686#endif