1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
15 #include "common/perf_counters.h"
16 #include "common/dout.h"
17 #include "include/Context.h"
18 #include "msg/Messenger.h"
19 #include "osdc/Journaler.h"
20 #include "common/errno.h"
21 #include "include/ceph_assert.h"
22 #include "common/Finisher.h"
24 #define dout_subsys ceph_subsys_journaler
26 #define dout_prefix *_dout << objecter->messenger->get_myname() \
27 << ".journaler." << name << (readonly ? "(ro) ":"(rw) ")
29 using std::chrono::seconds
;
32 class Journaler::C_DelayFlush
: public Context
{
35 explicit C_DelayFlush(Journaler
*j
) : journaler(j
) {}
36 void finish(int r
) override
{
37 journaler
->_do_delayed_flush();
41 void Journaler::set_readonly()
45 ldout(cct
, 1) << "set_readonly" << dendl
;
49 void Journaler::set_writeable()
53 ldout(cct
, 1) << "set_writeable" << dendl
;
57 void Journaler::create(file_layout_t
*l
, stream_format_t
const sf
)
61 ceph_assert(!readonly
);
65 journal_stream
.set_format(sf
);
68 prezeroing_pos
= prezero_pos
= write_pos
= flush_pos
=
69 safe_pos
= read_pos
= requested_pos
= received_pos
=
70 expire_pos
= trimming_pos
= trimmed_pos
=
71 next_safe_pos
= layout
.get_period();
73 ldout(cct
, 1) << "created blank journal at inode 0x" << std::hex
<< ino
74 << std::dec
<< ", format=" << stream_format
<< dendl
;
77 void Journaler::set_layout(file_layout_t
const *l
)
83 void Journaler::_set_layout(file_layout_t
const *l
)
87 if (layout
.pool_id
!= pg_pool
) {
88 // user can reset pool id through cephfs-journal-tool
89 lderr(cct
) << "may got older pool id from header layout" << dendl
;
92 last_written
.layout
= layout
;
93 last_committed
.layout
= layout
;
95 // prefetch intelligently.
96 // (watch out, this is big if you use big objects or weird striping)
97 uint64_t periods
= cct
->_conf
.get_val
<uint64_t>("journaler_prefetch_periods");
98 fetch_len
= layout
.get_period() * periods
;
102 /***************** HEADER *******************/
104 ostream
& operator<<(ostream
&out
, const Journaler::Header
&h
)
106 return out
<< "loghead(trim " << h
.trimmed_pos
107 << ", expire " << h
.expire_pos
108 << ", write " << h
.write_pos
109 << ", stream_format " << (int)(h
.stream_format
)
113 class Journaler::C_ReadHead
: public Context
{
117 explicit C_ReadHead(Journaler
*l
) : ls(l
) {}
118 void finish(int r
) override
{
119 ls
->_finish_read_head(r
, bl
);
123 class Journaler::C_RereadHead
: public Context
{
128 C_RereadHead(Journaler
*l
, Context
*onfinish_
) : ls (l
),
129 onfinish(onfinish_
) {}
130 void finish(int r
) override
{
131 ls
->_finish_reread_head(r
, bl
, onfinish
);
135 class Journaler::C_ProbeEnd
: public Context
{
139 explicit C_ProbeEnd(Journaler
*l
) : ls(l
), end(-1) {}
140 void finish(int r
) override
{
141 ls
->_finish_probe_end(r
, end
);
145 class Journaler::C_ReProbe
: public Context
{
147 C_OnFinisher
*onfinish
;
150 C_ReProbe(Journaler
*l
, C_OnFinisher
*onfinish_
) :
151 ls(l
), onfinish(onfinish_
), end(0) {}
152 void finish(int r
) override
{
153 ls
->_finish_reprobe(r
, end
, onfinish
);
157 void Journaler::recover(Context
*onread
)
161 onread
->complete(-EAGAIN
);
165 ldout(cct
, 1) << "recover start" << dendl
;
166 ceph_assert(state
!= STATE_ACTIVE
);
167 ceph_assert(readonly
);
170 waitfor_recover
.push_back(wrap_finisher(onread
));
172 if (state
!= STATE_UNDEF
) {
173 ldout(cct
, 1) << "recover - already recovering" << dendl
;
177 ldout(cct
, 1) << "read_head" << dendl
;
178 state
= STATE_READHEAD
;
179 C_ReadHead
*fin
= new C_ReadHead(this);
180 _read_head(fin
, &fin
->bl
);
183 void Journaler::_read_head(Context
*on_finish
, bufferlist
*bl
)
186 ceph_assert(state
== STATE_READHEAD
|| state
== STATE_REREADHEAD
);
188 object_t oid
= file_object_t(ino
, 0);
189 object_locator_t
oloc(pg_pool
);
190 objecter
->read_full(oid
, oloc
, CEPH_NOSNAP
, bl
, 0, wrap_finisher(on_finish
));
193 void Journaler::reread_head(Context
*onfinish
)
196 _reread_head(wrap_finisher(onfinish
));
200 * Re-read the head from disk, and set the write_pos, expire_pos, trimmed_pos
201 * from the on-disk header. This switches the state to STATE_REREADHEAD for
202 * the duration, and you shouldn't start a re-read while other operations are
203 * in-flight, nor start other operations while a re-read is in progress.
204 * Also, don't call this until the Journaler has finished its recovery and has
207 void Journaler::_reread_head(Context
*onfinish
)
209 ldout(cct
, 10) << "reread_head" << dendl
;
210 ceph_assert(state
== STATE_ACTIVE
);
212 state
= STATE_REREADHEAD
;
213 C_RereadHead
*fin
= new C_RereadHead(this, onfinish
);
214 _read_head(fin
, &fin
->bl
);
217 void Journaler::_finish_reread_head(int r
, bufferlist
& bl
, Context
*finish
)
221 finish
->complete(-EAGAIN
);
225 //read on-disk header into
226 ceph_assert(bl
.length() || r
< 0 );
231 auto p
= bl
.cbegin();
234 } catch (const buffer::error
&e
) {
235 finish
->complete(-EINVAL
);
238 prezeroing_pos
= prezero_pos
= write_pos
= flush_pos
= safe_pos
= next_safe_pos
240 expire_pos
= h
.expire_pos
;
241 trimmed_pos
= trimming_pos
= h
.trimmed_pos
;
243 state
= STATE_ACTIVE
;
249 void Journaler::_finish_read_head(int r
, bufferlist
& bl
)
255 ceph_assert(state
== STATE_READHEAD
);
258 ldout(cct
, 0) << "error getting journal off disk" << dendl
;
260 ls
.swap(waitfor_recover
);
261 finish_contexts(cct
, ls
, r
);
265 if (bl
.length() == 0) {
266 ldout(cct
, 1) << "_finish_read_head r=" << r
267 << " read 0 bytes, assuming empty log" << dendl
;
268 state
= STATE_ACTIVE
;
270 ls
.swap(waitfor_recover
);
271 finish_contexts(cct
, ls
, 0);
276 bool corrupt
= false;
278 auto p
= bl
.cbegin();
282 if (h
.magic
!= magic
) {
283 ldout(cct
, 0) << "on disk magic '" << h
.magic
<< "' != my magic '"
284 << magic
<< "'" << dendl
;
286 } else if (h
.write_pos
< h
.expire_pos
|| h
.expire_pos
< h
.trimmed_pos
) {
287 ldout(cct
, 0) << "Corrupt header (bad offsets): " << h
<< dendl
;
290 } catch (const buffer::error
&e
) {
296 ls
.swap(waitfor_recover
);
297 finish_contexts(cct
, ls
, -EINVAL
);
301 prezeroing_pos
= prezero_pos
= write_pos
= flush_pos
= safe_pos
= next_safe_pos
303 read_pos
= requested_pos
= received_pos
= expire_pos
= h
.expire_pos
;
304 trimmed_pos
= trimming_pos
= h
.trimmed_pos
;
307 _set_layout(&h
.layout
);
308 stream_format
= h
.stream_format
;
309 journal_stream
.set_format(h
.stream_format
);
311 ldout(cct
, 1) << "_finish_read_head " << h
312 << ". probing for end of log (from " << write_pos
<< ")..."
314 C_ProbeEnd
*fin
= new C_ProbeEnd(this);
315 state
= STATE_PROBING
;
316 _probe(fin
, &fin
->end
);
319 void Journaler::_probe(Context
*finish
, uint64_t *end
)
322 ldout(cct
, 1) << "probing for end of the log" << dendl
;
323 ceph_assert(state
== STATE_PROBING
|| state
== STATE_REPROBING
);
325 filer
.probe(ino
, &layout
, CEPH_NOSNAP
,
326 write_pos
, end
, true, 0, wrap_finisher(finish
));
329 void Journaler::_reprobe(C_OnFinisher
*finish
)
331 ldout(cct
, 10) << "reprobe" << dendl
;
332 ceph_assert(state
== STATE_ACTIVE
);
334 state
= STATE_REPROBING
;
335 C_ReProbe
*fin
= new C_ReProbe(this, finish
);
336 _probe(fin
, &fin
->end
);
340 void Journaler::_finish_reprobe(int r
, uint64_t new_end
,
341 C_OnFinisher
*onfinish
)
345 onfinish
->complete(-EAGAIN
);
349 ceph_assert(new_end
>= write_pos
|| r
< 0);
350 ldout(cct
, 1) << "_finish_reprobe new_end = " << new_end
351 << " (header had " << write_pos
<< ")."
353 prezeroing_pos
= prezero_pos
= write_pos
= flush_pos
= safe_pos
= next_safe_pos
= new_end
;
354 state
= STATE_ACTIVE
;
355 onfinish
->complete(r
);
358 void Journaler::_finish_probe_end(int r
, uint64_t end
)
364 ceph_assert(state
== STATE_PROBING
);
365 if (r
< 0) { // error in probing
368 if (((int64_t)end
) == -1) {
370 ldout(cct
, 1) << "_finish_probe_end write_pos = " << end
<< " (header had "
371 << write_pos
<< "). log was empty. recovered." << dendl
;
372 ceph_abort(); // hrm.
374 ceph_assert(end
>= write_pos
);
375 ldout(cct
, 1) << "_finish_probe_end write_pos = " << end
376 << " (header had " << write_pos
<< "). recovered."
380 state
= STATE_ACTIVE
;
382 prezeroing_pos
= prezero_pos
= write_pos
= flush_pos
= safe_pos
= next_safe_pos
= end
;
387 ls
.swap(waitfor_recover
);
388 finish_contexts(cct
, ls
, r
);
391 class Journaler::C_RereadHeadProbe
: public Context
394 C_OnFinisher
*final_finish
;
396 C_RereadHeadProbe(Journaler
*l
, C_OnFinisher
*finish
) :
397 ls(l
), final_finish(finish
) {}
398 void finish(int r
) override
{
399 ls
->_finish_reread_head_and_probe(r
, final_finish
);
403 void Journaler::reread_head_and_probe(Context
*onfinish
)
407 ceph_assert(state
== STATE_ACTIVE
);
408 _reread_head(new C_RereadHeadProbe(this, wrap_finisher(onfinish
)));
411 void Journaler::_finish_reread_head_and_probe(int r
, C_OnFinisher
*onfinish
)
413 // Expect to be called back from finish_reread_head, which already takes lock
416 onfinish
->complete(-EAGAIN
);
420 // Let the caller know that the operation has failed or was intentionally
421 // failed since the caller has been blocklisted.
422 if (r
== -EBLOCKLISTED
) {
423 onfinish
->complete(r
);
427 ceph_assert(!r
); //if we get an error, we're boned
434 class Journaler::C_WriteHead
: public Context
{
438 C_OnFinisher
*oncommit
;
439 C_WriteHead(Journaler
*l
, Header
& h_
, C_OnFinisher
*c
) : ls(l
), h(h_
),
441 void finish(int r
) override
{
442 ls
->_finish_write_head(r
, h
, oncommit
);
446 void Journaler::write_head(Context
*oncommit
)
449 _write_head(oncommit
);
453 void Journaler::_write_head(Context
*oncommit
)
455 ceph_assert(!readonly
);
456 ceph_assert(state
== STATE_ACTIVE
);
457 last_written
.trimmed_pos
= trimmed_pos
;
458 last_written
.expire_pos
= expire_pos
;
459 last_written
.unused_field
= expire_pos
;
460 last_written
.write_pos
= safe_pos
;
461 last_written
.stream_format
= stream_format
;
462 ldout(cct
, 10) << "write_head " << last_written
<< dendl
;
464 // Avoid persisting bad pointers in case of bugs
465 ceph_assert(last_written
.write_pos
>= last_written
.expire_pos
);
466 ceph_assert(last_written
.expire_pos
>= last_written
.trimmed_pos
);
468 last_wrote_head
= ceph::real_clock::now();
471 encode(last_written
, bl
);
474 object_t oid
= file_object_t(ino
, 0);
475 object_locator_t
oloc(pg_pool
);
476 objecter
->write_full(oid
, oloc
, snapc
, bl
, ceph::real_clock::now(), 0,
477 wrap_finisher(new C_WriteHead(
479 wrap_finisher(oncommit
))),
483 void Journaler::_finish_write_head(int r
, Header
&wrote
,
484 C_OnFinisher
*oncommit
)
489 lderr(cct
) << "_finish_write_head got " << cpp_strerror(r
) << dendl
;
490 handle_write_error(r
);
493 ceph_assert(!readonly
);
494 ldout(cct
, 10) << "_finish_write_head " << wrote
<< dendl
;
495 last_committed
= wrote
;
497 oncommit
->complete(r
);
504 /***************** WRITING *******************/
506 class Journaler::C_Flush
: public Context
{
509 ceph::real_time stamp
;
511 C_Flush(Journaler
*l
, int64_t s
, ceph::real_time st
)
512 : ls(l
), start(s
), stamp(st
) {}
513 void finish(int r
) override
{
514 ls
->_finish_flush(r
, start
, stamp
);
518 void Journaler::_finish_flush(int r
, uint64_t start
, ceph::real_time stamp
)
521 ceph_assert(!readonly
);
524 lderr(cct
) << "_finish_flush got " << cpp_strerror(r
) << dendl
;
525 handle_write_error(r
);
529 ceph_assert(start
< flush_pos
);
533 ceph::timespan lat
= ceph::real_clock::now() - stamp
;
534 logger
->tinc(logger_key_lat
, lat
);
538 auto it
= pending_safe
.find(start
);
539 ceph_assert(it
!= pending_safe
.end());
540 uint64_t min_next_safe_pos
= pending_safe
.begin()->second
;
541 pending_safe
.erase(it
);
542 if (pending_safe
.empty())
543 safe_pos
= next_safe_pos
;
545 safe_pos
= min_next_safe_pos
;
547 ldout(cct
, 10) << "_finish_flush safe from " << start
548 << ", pending_safe " << pending_safe
549 << ", (prezeroing/prezero)/write/flush/safe positions now "
550 << "(" << prezeroing_pos
<< "/" << prezero_pos
<< ")/"
551 << write_pos
<< "/" << flush_pos
<< "/" << safe_pos
554 // kick waiters <= safe_pos
555 if (!waitfor_safe
.empty()) {
557 while (!waitfor_safe
.empty()) {
558 auto it
= waitfor_safe
.begin();
559 if (it
->first
> safe_pos
)
561 ls
.splice(ls
.end(), it
->second
);
562 waitfor_safe
.erase(it
);
564 finish_contexts(cct
, ls
);
570 uint64_t Journaler::append_entry(bufferlist
& bl
)
574 ceph_assert(!readonly
);
575 uint32_t s
= bl
.length();
578 size_t delta
= bl
.length() + journal_stream
.get_envelope_size();
579 // write_buf space is nearly full
580 if (!write_buf_throttle
.get_or_fail(delta
)) {
582 ldout(cct
, 10) << "write_buf_throttle wait, delta " << delta
<< dendl
;
583 write_buf_throttle
.get(delta
);
586 ldout(cct
, 20) << "write_buf_throttle get, delta " << delta
<< dendl
;
587 size_t wrote
= journal_stream
.write(bl
, &write_buf
, write_pos
);
588 ldout(cct
, 10) << "append_entry len " << s
<< " to " << write_pos
<< "~"
592 // flush previous object?
593 uint64_t su
= get_layout_period();
595 uint64_t write_off
= write_pos
% su
;
596 uint64_t write_obj
= write_pos
/ su
;
597 uint64_t flush_obj
= flush_pos
/ su
;
598 if (write_obj
!= flush_obj
) {
599 ldout(cct
, 10) << " flushing completed object(s) (su " << su
<< " wro "
600 << write_obj
<< " flo " << flush_obj
<< ")" << dendl
;
601 _do_flush(write_buf
.length() - write_off
);
603 // if _do_flush() skips flushing some data, it does do a best effort to
604 // update next_safe_pos.
605 if (write_buf
.length() > 0 &&
606 write_buf
.length() <= wrote
) { // the unflushed data are within this entry
607 // set next_safe_pos to end of previous entry
608 next_safe_pos
= write_pos
- wrote
;
616 void Journaler::_do_flush(unsigned amount
)
620 if (write_pos
== flush_pos
)
622 ceph_assert(write_pos
> flush_pos
);
623 ceph_assert(!readonly
);
626 uint64_t len
= write_pos
- flush_pos
;
627 ceph_assert(len
== write_buf
.length());
628 if (amount
&& amount
< len
)
631 // zero at least two full periods ahead. this ensures
632 // that the next object will not exist.
633 uint64_t period
= get_layout_period();
634 if (flush_pos
+ len
+ 2*period
> prezero_pos
) {
637 int64_t newlen
= prezero_pos
- flush_pos
- period
;
639 ldout(cct
, 10) << "_do_flush wanted to do " << flush_pos
<< "~" << len
640 << " already too close to prezero_pos " << prezero_pos
641 << ", zeroing first" << dendl
;
642 waiting_for_zero_pos
= flush_pos
+ len
;
645 if (static_cast<uint64_t>(newlen
) < len
) {
646 ldout(cct
, 10) << "_do_flush wanted to do " << flush_pos
<< "~" << len
647 << " but hit prezero_pos " << prezero_pos
648 << ", will do " << flush_pos
<< "~" << newlen
<< dendl
;
649 waiting_for_zero_pos
= flush_pos
+ len
;
653 ldout(cct
, 10) << "_do_flush flushing " << flush_pos
<< "~" << len
<< dendl
;
655 // submit write for anything pending
656 // flush _start_ pos to _finish_flush
657 ceph::real_time now
= ceph::real_clock::now();
660 Context
*onsafe
= new C_Flush(this, flush_pos
, now
); // on COMMIT
661 pending_safe
[flush_pos
] = next_safe_pos
;
666 if (len
== write_buf
.length()) {
667 write_bl
.swap(write_buf
);
668 next_safe_pos
= write_pos
;
670 write_buf
.splice(0, len
, &write_bl
);
671 // Keys of waitfor_safe map are journal entry boundaries.
672 // Try finding a journal entry that we are actually flushing
673 // and set next_safe_pos to end of it. This is best effort.
674 // The one we found may not be the lastest flushing entry.
675 auto p
= waitfor_safe
.lower_bound(flush_pos
+ len
);
676 if (p
!= waitfor_safe
.end()) {
677 if (p
->first
> flush_pos
+ len
&& p
!= waitfor_safe
.begin())
679 if (p
->first
<= flush_pos
+ len
&& p
->first
> next_safe_pos
)
680 next_safe_pos
= p
->first
;
684 filer
.write(ino
, &layout
, snapc
,
685 flush_pos
, len
, write_bl
, ceph::real_clock::now(),
687 wrap_finisher(onsafe
), write_iohint
);
690 ceph_assert(write_buf
.length() == write_pos
- flush_pos
);
691 write_buf_throttle
.put(len
);
692 ldout(cct
, 20) << "write_buf_throttle put, len " << len
<< dendl
;
695 << "_do_flush (prezeroing/prezero)/write/flush/safe pointers now at "
696 << "(" << prezeroing_pos
<< "/" << prezero_pos
<< ")/" << write_pos
697 << "/" << flush_pos
<< "/" << safe_pos
<< dendl
;
703 void Journaler::wait_for_flush(Context
*onsafe
)
708 onsafe
->complete(-EAGAIN
);
711 _wait_for_flush(onsafe
);
714 void Journaler::_wait_for_flush(Context
*onsafe
)
716 ceph_assert(!readonly
);
718 // all flushed and safe?
719 if (write_pos
== safe_pos
) {
720 ceph_assert(write_buf
.length() == 0);
722 << "flush nothing to flush, (prezeroing/prezero)/write/flush/safe "
723 "pointers at " << "(" << prezeroing_pos
<< "/" << prezero_pos
<< ")/"
724 << write_pos
<< "/" << flush_pos
<< "/" << safe_pos
<< dendl
;
726 finisher
->queue(onsafe
, 0);
733 waitfor_safe
[write_pos
].push_back(wrap_finisher(onsafe
));
737 void Journaler::flush(Context
*onsafe
)
742 onsafe
->complete(-EAGAIN
);
745 _flush(wrap_finisher(onsafe
));
748 void Journaler::_flush(C_OnFinisher
*onsafe
)
750 ceph_assert(!readonly
);
752 if (write_pos
== flush_pos
) {
753 ceph_assert(write_buf
.length() == 0);
754 ldout(cct
, 10) << "flush nothing to flush, (prezeroing/prezero)/write/"
755 "flush/safe pointers at " << "(" << prezeroing_pos
<< "/" << prezero_pos
756 << ")/" << write_pos
<< "/" << flush_pos
<< "/" << safe_pos
763 _wait_for_flush(onsafe
);
767 if (_write_head_needed()) {
772 bool Journaler::_write_head_needed()
774 return last_wrote_head
+ seconds(cct
->_conf
.get_val
<int64_t>("journaler_write_head_interval"))
775 < ceph::real_clock::now();
779 /*************** prezeroing ******************/
781 struct C_Journaler_Prezero
: public Context
{
782 Journaler
*journaler
;
784 C_Journaler_Prezero(Journaler
*j
, uint64_t f
, uint64_t l
)
785 : journaler(j
), from(f
), len(l
) {}
786 void finish(int r
) override
{
787 journaler
->_finish_prezero(r
, from
, len
);
791 void Journaler::_issue_prezero()
793 ceph_assert(prezeroing_pos
>= flush_pos
);
795 uint64_t num_periods
= cct
->_conf
.get_val
<uint64_t>("journaler_prezero_periods");
797 * issue zero requests based on write_pos, even though the invariant
798 * is that we zero ahead of flush_pos.
800 uint64_t period
= get_layout_period();
801 uint64_t to
= write_pos
+ period
* num_periods
+ period
- 1;
804 if (prezeroing_pos
>= to
) {
805 ldout(cct
, 20) << "_issue_prezero target " << to
<< " <= prezeroing_pos "
806 << prezeroing_pos
<< dendl
;
810 while (prezeroing_pos
< to
) {
812 if (prezeroing_pos
% period
== 0) {
814 ldout(cct
, 10) << "_issue_prezero removing " << prezeroing_pos
<< "~"
815 << period
<< " (full period)" << dendl
;
817 len
= period
- (prezeroing_pos
% period
);
818 ldout(cct
, 10) << "_issue_prezero zeroing " << prezeroing_pos
<< "~"
819 << len
<< " (partial period)" << dendl
;
822 Context
*c
= wrap_finisher(new C_Journaler_Prezero(this, prezeroing_pos
,
824 filer
.zero(ino
, &layout
, snapc
, prezeroing_pos
, len
,
825 ceph::real_clock::now(), 0, c
);
826 prezeroing_pos
+= len
;
830 // Lock cycle because we get called out of objecter callback (holding
831 // objecter read lock), but there are also cases where we take the journaler
832 // lock before calling into objecter to do I/O.
833 void Journaler::_finish_prezero(int r
, uint64_t start
, uint64_t len
)
837 ldout(cct
, 10) << "_prezeroed to " << start
<< "~" << len
838 << ", prezeroing/prezero was " << prezeroing_pos
<< "/"
839 << prezero_pos
<< ", pending " << pending_zero
841 if (r
< 0 && r
!= -ENOENT
) {
842 lderr(cct
) << "_prezeroed got " << cpp_strerror(r
) << dendl
;
843 handle_write_error(r
);
847 ceph_assert(r
== 0 || r
== -ENOENT
);
849 if (start
== prezero_pos
) {
851 while (!pending_zero
.empty() &&
852 pending_zero
.begin().get_start() == prezero_pos
) {
853 interval_set
<uint64_t>::iterator
b(pending_zero
.begin());
854 prezero_pos
+= b
.get_len();
855 pending_zero
.erase(b
);
858 if (waiting_for_zero_pos
> flush_pos
) {
859 _do_flush(waiting_for_zero_pos
- flush_pos
);
862 if (prezero_pos
== prezeroing_pos
&&
863 !waitfor_prezero
.empty()) {
865 ls
.swap(waitfor_prezero
);
866 finish_contexts(cct
, ls
, 0);
869 pending_zero
.insert(start
, len
);
871 ldout(cct
, 10) << "_prezeroed prezeroing/prezero now " << prezeroing_pos
872 << "/" << prezero_pos
873 << ", pending " << pending_zero
877 void Journaler::wait_for_prezero(Context
*onfinish
)
879 ceph_assert(onfinish
);
882 if (prezero_pos
== prezeroing_pos
) {
883 finisher
->queue(onfinish
, 0);
886 waitfor_prezero
.push_back(wrap_finisher(onfinish
));
890 /***************** READING *******************/
893 class Journaler::C_Read
: public Context
{
899 C_Read(Journaler
*j
, uint64_t o
, uint64_t l
) : ls(j
), offset(o
), length(l
) {}
900 void finish(int r
) override
{
901 ls
->_finish_read(r
, offset
, length
, bl
);
905 class Journaler::C_RetryRead
: public Context
{
908 explicit C_RetryRead(Journaler
*l
) : ls(l
) {}
910 void finish(int r
) override
{
911 // Should only be called from waitfor_safe i.e. already inside lock
912 // (ls->lock is locked
917 void Journaler::_finish_read(int r
, uint64_t offset
, uint64_t length
,
923 ldout(cct
, 0) << "_finish_read got error " << r
<< dendl
;
926 ldout(cct
, 10) << "_finish_read got " << offset
<< "~" << bl
.length()
928 if (bl
.length() < length
) {
929 ldout(cct
, 0) << "_finish_read got less than expected (" << length
<< ")"
937 C_OnFinisher
*f
= on_readable
;
944 prefetch_buf
[offset
].swap(bl
);
947 _assimilate_prefetch();
948 } catch (const buffer::error
&err
) {
949 lderr(cct
) << "_decode error from assimilate_prefetch" << dendl
;
952 C_OnFinisher
*f
= on_readable
;
961 void Journaler::_assimilate_prefetch()
963 bool was_readable
= readable
;
965 bool got_any
= false;
966 while (!prefetch_buf
.empty()) {
967 map
<uint64_t,bufferlist
>::iterator p
= prefetch_buf
.begin();
968 if (p
->first
!= received_pos
) {
969 uint64_t gap
= p
->first
- received_pos
;
970 ldout(cct
, 10) << "_assimilate_prefetch gap of " << gap
971 << " from received_pos " << received_pos
972 << " to first prefetched buffer " << p
->first
<< dendl
;
976 ldout(cct
, 10) << "_assimilate_prefetch " << p
->first
<< "~"
977 << p
->second
.length() << dendl
;
978 received_pos
+= p
->second
.length();
979 read_buf
.claim_append(p
->second
);
980 ceph_assert(received_pos
<= requested_pos
);
981 prefetch_buf
.erase(p
);
986 ldout(cct
, 10) << "_assimilate_prefetch read_buf now " << read_pos
<< "~"
987 << read_buf
.length() << ", read pointers read_pos=" << read_pos
988 << " received_pos=" << received_pos
<< " requested_pos=" << requested_pos
991 // Update readability (this will also hit any decode errors resulting
993 readable
= _is_readable();
996 if ((got_any
&& !was_readable
&& readable
) || read_pos
== write_pos
) {
998 ldout(cct
, 10) << "_finish_read now readable (or at journal end) readable="
999 << readable
<< " read_pos=" << read_pos
<< " write_pos="
1000 << write_pos
<< dendl
;
1002 C_OnFinisher
*f
= on_readable
;
1009 void Journaler::_issue_read(uint64_t len
)
1011 // stuck at safe_pos? (this is needed if we are reading the tail of
1012 // a journal we are also writing to)
1013 ceph_assert(requested_pos
<= safe_pos
);
1014 if (requested_pos
== safe_pos
) {
1015 ldout(cct
, 10) << "_issue_read requested_pos = safe_pos = " << safe_pos
1016 << ", waiting" << dendl
;
1017 ceph_assert(write_pos
> requested_pos
);
1018 if (pending_safe
.empty()) {
1022 // Make sure keys of waitfor_safe map are journal entry boundaries.
1023 // The key we used here is either next_safe_pos or old value of
1024 // next_safe_pos. next_safe_pos is always set to journal entry
1026 auto p
= pending_safe
.rbegin();
1027 if (p
!= pending_safe
.rend())
1028 waitfor_safe
[p
->second
].push_back(new C_RetryRead(this));
1030 waitfor_safe
[next_safe_pos
].push_back(new C_RetryRead(this));
1034 // don't read too much
1035 if (requested_pos
+ len
> safe_pos
) {
1036 len
= safe_pos
- requested_pos
;
1037 ldout(cct
, 10) << "_issue_read reading only up to safe_pos " << safe_pos
1042 ldout(cct
, 10) << "_issue_read reading " << requested_pos
<< "~" << len
1043 << ", read pointers read_pos=" << read_pos
<< " received_pos=" << received_pos
1044 << " requested_pos+len=" << (requested_pos
+len
) << dendl
;
1046 // step by period (object). _don't_ do a single big filer.read()
1047 // here because it will wait for all object reads to complete before
1048 // giving us back any data. this way we can process whatever bits
1049 // come in that are contiguous.
1050 uint64_t period
= get_layout_period();
1052 uint64_t e
= requested_pos
+ period
;
1054 uint64_t l
= e
- requested_pos
;
1057 C_Read
*c
= new C_Read(this, requested_pos
, l
);
1058 filer
.read(ino
, &layout
, CEPH_NOSNAP
, requested_pos
, l
, &c
->bl
, 0,
1059 wrap_finisher(c
), CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
);
1065 void Journaler::_prefetch()
1070 ldout(cct
, 10) << "_prefetch" << dendl
;
1073 if (temp_fetch_len
) {
1074 ldout(cct
, 10) << "_prefetch temp_fetch_len " << temp_fetch_len
<< dendl
;
1075 pf
= temp_fetch_len
;
1081 uint64_t raw_target
= read_pos
+ pf
;
1083 // read full log segments, so increase if necessary
1084 uint64_t period
= get_layout_period();
1085 uint64_t remainder
= raw_target
% period
;
1086 uint64_t adjustment
= remainder
? period
- remainder
: 0;
1087 uint64_t target
= raw_target
+ adjustment
;
1089 // don't read past the log tail
1090 if (target
> write_pos
)
1093 if (requested_pos
< target
) {
1094 uint64_t len
= target
- requested_pos
;
1095 ldout(cct
, 10) << "_prefetch " << pf
<< " requested_pos " << requested_pos
1096 << " < target " << target
<< " (" << raw_target
1097 << "), prefetching " << len
<< dendl
;
1099 if (pending_safe
.empty() && write_pos
> safe_pos
) {
1100 // If we are reading and writing the journal, then we may need
1101 // to issue a flush if one isn't already in progress.
1102 // Avoid doing a flush every time so that if we do write/read/write/read
1103 // we don't end up flushing after every write.
1104 ldout(cct
, 10) << "_prefetch: requested_pos=" << requested_pos
1105 << ", read_pos=" << read_pos
1106 << ", write_pos=" << write_pos
1107 << ", safe_pos=" << safe_pos
<< dendl
;
1117 * _is_readable() - return true if next entry is ready.
1119 bool Journaler::_is_readable()
1121 // anything to read?
1122 if (read_pos
== write_pos
)
1125 // Check if the retrieve bytestream has enough for an entry
1127 if (journal_stream
.readable(read_buf
, &need
)) {
1131 ldout (cct
, 10) << "_is_readable read_buf.length() == " << read_buf
.length()
1132 << ", but need " << need
<< " for next entry; fetch_len is "
1133 << fetch_len
<< dendl
;
1135 // partial fragment at the end?
1136 if (received_pos
== write_pos
) {
1137 ldout(cct
, 10) << "is_readable() detected partial entry at tail, "
1138 "adjusting write_pos to " << read_pos
<< dendl
;
1141 prezeroing_pos
= prezero_pos
= write_pos
= flush_pos
= safe_pos
= next_safe_pos
= read_pos
;
1142 ceph_assert(write_buf
.length() == 0);
1143 ceph_assert(waitfor_safe
.empty());
1146 requested_pos
= received_pos
= read_pos
;
1149 // FIXME: truncate on disk?
1154 if (need
> fetch_len
) {
1155 temp_fetch_len
= need
;
1156 ldout(cct
, 10) << "_is_readable noting temp_fetch_len " << temp_fetch_len
1160 ldout(cct
, 10) << "_is_readable: not readable, returning false" << dendl
;
1165 * is_readable() - kickstart prefetch, too
1167 bool Journaler::is_readable()
1180 class Journaler::C_EraseFinish
: public Context
{
1181 Journaler
*journaler
;
1182 C_OnFinisher
*completion
;
1184 C_EraseFinish(Journaler
*j
, C_OnFinisher
*c
) : journaler(j
), completion(c
) {}
1185 void finish(int r
) override
{
1186 journaler
->_finish_erase(r
, completion
);
1191 * Entirely erase the journal, including header. For use when you
1192 * have already made a copy of the journal somewhere else.
1194 void Journaler::erase(Context
*completion
)
1198 // Async delete the journal data
1199 uint64_t first
= trimmed_pos
/ get_layout_period();
1200 uint64_t num
= (write_pos
- trimmed_pos
) / get_layout_period() + 2;
1201 filer
.purge_range(ino
, &layout
, SnapContext(), first
, num
,
1202 ceph::real_clock::now(), 0,
1203 wrap_finisher(new C_EraseFinish(
1204 this, wrap_finisher(completion
))));
1206 // We will not start the operation to delete the header until
1207 // _finish_erase has seen the data deletion succeed: otherwise if
1208 // there was an error deleting data we might prematurely delete the
1209 // header thereby lose our reference to the data.
1212 void Journaler::_finish_erase(int data_result
, C_OnFinisher
*completion
)
1215 if (is_stopping()) {
1216 completion
->complete(-EAGAIN
);
1220 if (data_result
== 0) {
1221 // Async delete the journal header
1222 filer
.purge_range(ino
, &layout
, SnapContext(), 0, 1,
1223 ceph::real_clock::now(),
1224 0, wrap_finisher(completion
));
1226 lderr(cct
) << "Failed to delete journal " << ino
<< " data: "
1227 << cpp_strerror(data_result
) << dendl
;
1228 completion
->complete(data_result
);
1232 /* try_read_entry(bl)
1233 * read entry into bl if it's ready.
1234 * otherwise, do nothing.
1236 bool Journaler::try_read_entry(bufferlist
& bl
)
1241 ldout(cct
, 10) << "try_read_entry at " << read_pos
<< " not readable"
1249 consumed
= journal_stream
.read(read_buf
, &bl
, &start_ptr
);
1250 if (stream_format
>= JOURNAL_FORMAT_RESILIENT
) {
1251 ceph_assert(start_ptr
== read_pos
);
1253 } catch (const buffer::error
&e
) {
1254 lderr(cct
) << __func__
<< ": decode error from journal_stream" << dendl
;
1259 ldout(cct
, 10) << "try_read_entry at " << read_pos
<< " read "
1260 << read_pos
<< "~" << consumed
<< " (have "
1261 << read_buf
.length() << ")" << dendl
;
1263 read_pos
+= consumed
;
1265 // We were readable, we might not be any more
1266 readable
= _is_readable();
1267 } catch (const buffer::error
&e
) {
1268 lderr(cct
) << __func__
<< ": decode error from _is_readable" << dendl
;
1276 // If bufferlist consists of discontiguous memory, decoding types whose
1277 // denc_traits needs contiguous memory is inefficient. The bufferlist may
1278 // get copied to temporary memory multiple times (copy_shallow() in
1279 // src/include/denc.h actually does deep copy)
1280 if (bl
.get_num_buffers() > 1)
1285 void Journaler::wait_for_readable(Context
*onreadable
)
1288 if (is_stopping()) {
1289 finisher
->queue(onreadable
, -EAGAIN
);
1293 ceph_assert(on_readable
== 0);
1295 ldout(cct
, 10) << "wait_for_readable at " << read_pos
<< " onreadable "
1296 << onreadable
<< dendl
;
1297 on_readable
= wrap_finisher(onreadable
);
1299 // race with OSD reply
1300 finisher
->queue(onreadable
, 0);
1304 bool Journaler::have_waiter() const
1306 return on_readable
!= nullptr;
1312 /***************** TRIMMING *******************/
1315 class Journaler::C_Trim
: public Context
{
1319 C_Trim(Journaler
*l
, int64_t t
) : ls(l
), to(t
) {}
1320 void finish(int r
) override
{
1321 ls
->_finish_trim(r
, to
);
1325 void Journaler::trim()
1331 void Journaler::_trim()
1336 ceph_assert(!readonly
);
1337 uint64_t period
= get_layout_period();
1338 uint64_t trim_to
= last_committed
.expire_pos
;
1339 trim_to
-= trim_to
% period
;
1340 ldout(cct
, 10) << "trim last_commited head was " << last_committed
1341 << ", can trim to " << trim_to
1343 if (trim_to
== 0 || trim_to
== trimming_pos
) {
1344 ldout(cct
, 10) << "trim already trimmed/trimming to "
1345 << trimmed_pos
<< "/" << trimming_pos
<< dendl
;
1349 if (trimming_pos
> trimmed_pos
) {
1350 ldout(cct
, 10) << "trim already trimming atm, try again later. "
1351 "trimmed/trimming is " << trimmed_pos
<< "/" << trimming_pos
<< dendl
;
1356 ceph_assert(trim_to
<= write_pos
);
1357 ceph_assert(trim_to
<= expire_pos
);
1358 ceph_assert(trim_to
> trimming_pos
);
1359 ldout(cct
, 10) << "trim trimming to " << trim_to
1360 << ", trimmed/trimming/expire are "
1361 << trimmed_pos
<< "/" << trimming_pos
<< "/" << expire_pos
1364 // delete range of objects
1365 uint64_t first
= trimming_pos
/ period
;
1366 uint64_t num
= (trim_to
- trimming_pos
) / period
;
1368 filer
.purge_range(ino
, &layout
, snapc
, first
, num
,
1369 ceph::real_clock::now(), 0,
1370 wrap_finisher(new C_Trim(this, trim_to
)));
1371 trimming_pos
= trim_to
;
1374 void Journaler::_finish_trim(int r
, uint64_t to
)
1378 ceph_assert(!readonly
);
1379 ldout(cct
, 10) << "_finish_trim trimmed_pos was " << trimmed_pos
1380 << ", trimmed/trimming/expire now "
1381 << to
<< "/" << trimming_pos
<< "/" << expire_pos
1383 if (r
< 0 && r
!= -ENOENT
) {
1384 lderr(cct
) << "_finish_trim got " << cpp_strerror(r
) << dendl
;
1385 handle_write_error(r
);
1389 ceph_assert(r
>= 0 || r
== -ENOENT
);
1391 ceph_assert(to
<= trimming_pos
);
1392 ceph_assert(to
> trimmed_pos
);
1396 void Journaler::handle_write_error(int r
)
1400 lderr(cct
) << "handle_write_error " << cpp_strerror(r
) << dendl
;
1401 if (on_write_error
) {
1402 on_write_error
->complete(r
);
1403 on_write_error
= NULL
;
1404 called_write_error
= true;
1405 } else if (called_write_error
) {
1406 /* We don't call error handler more than once, subsequent errors
1407 * are dropped -- this is okay as long as the error handler does
1408 * something dramatic like respawn */
1409 lderr(cct
) << __func__
<< ": multiple write errors, handler already called"
1412 ceph_abort_msg("unhandled write error");
1418 * Test whether the 'read_buf' byte stream has enough data to read
1421 * sets 'next_envelope_size' to the number of bytes needed to advance (enough
1422 * to get the next header if header was unavailable, or enough to get the whole
1423 * next entry if the header was available but the body wasn't).
1425 bool JournalStream::readable(bufferlist
&read_buf
, uint64_t *need
) const
1427 ceph_assert(need
!= NULL
);
1429 uint32_t entry_size
= 0;
1430 uint64_t entry_sentinel
= 0;
1431 auto p
= read_buf
.cbegin();
1433 // Do we have enough data to decode an entry prefix?
1434 if (format
>= JOURNAL_FORMAT_RESILIENT
) {
1435 *need
= sizeof(entry_size
) + sizeof(entry_sentinel
);
1437 *need
= sizeof(entry_size
);
1439 if (read_buf
.length() >= *need
) {
1440 if (format
>= JOURNAL_FORMAT_RESILIENT
) {
1441 decode(entry_sentinel
, p
);
1442 if (entry_sentinel
!= sentinel
) {
1443 throw buffer::malformed_input("Invalid sentinel");
1447 decode(entry_size
, p
);
1452 // Do we have enough data to decode an entry prefix, payload and suffix?
1453 if (format
>= JOURNAL_FORMAT_RESILIENT
) {
1454 *need
= JOURNAL_ENVELOPE_RESILIENT
+ entry_size
;
1456 *need
= JOURNAL_ENVELOPE_LEGACY
+ entry_size
;
1458 if (read_buf
.length() >= *need
) {
1459 return true; // No more bytes needed
1467 * Consume one entry from a journal byte stream 'from', splicing a
1468 * serialized LogEvent blob into 'entry'.
1470 * 'entry' must be non null and point to an empty bufferlist.
1472 * 'from' must contain sufficient valid data (i.e. readable is true).
1474 * 'start_ptr' will be set to the entry's start pointer, if the collection
1475 * format provides it. It may not be null.
1477 * @returns The number of bytes consumed from the `from` byte stream. Note
1478 * that this is not equal to the length of `entry`, which contains
1479 * the inner serialized LogEvent and not the envelope.
1481 size_t JournalStream::read(bufferlist
&from
, bufferlist
*entry
,
1482 uint64_t *start_ptr
)
1484 ceph_assert(start_ptr
!= NULL
);
1485 ceph_assert(entry
!= NULL
);
1486 ceph_assert(entry
->length() == 0);
1488 uint32_t entry_size
= 0;
1490 // Consume envelope prefix: entry_size and entry_sentinel
1491 auto from_ptr
= from
.cbegin();
1492 if (format
>= JOURNAL_FORMAT_RESILIENT
) {
1493 uint64_t entry_sentinel
= 0;
1494 decode(entry_sentinel
, from_ptr
);
1495 // Assertion instead of clean check because of precondition of this
1496 // fn is that readable() already passed
1497 ceph_assert(entry_sentinel
== sentinel
);
1499 decode(entry_size
, from_ptr
);
1501 // Read out the payload
1502 from_ptr
.copy(entry_size
, *entry
);
1504 // Consume the envelope suffix (start_ptr)
1505 if (format
>= JOURNAL_FORMAT_RESILIENT
) {
1506 decode(*start_ptr
, from_ptr
);
1511 // Trim the input buffer to discard the bytes we have consumed
1512 from
.splice(0, from_ptr
.get_off());
1514 return from_ptr
.get_off();
1521 size_t JournalStream::write(bufferlist
&entry
, bufferlist
*to
,
1522 uint64_t const &start_ptr
)
1524 ceph_assert(to
!= NULL
);
1526 uint32_t const entry_size
= entry
.length();
1527 if (format
>= JOURNAL_FORMAT_RESILIENT
) {
1528 encode(sentinel
, *to
);
1530 encode(entry_size
, *to
);
1531 to
->claim_append(entry
);
1532 if (format
>= JOURNAL_FORMAT_RESILIENT
) {
1533 encode(start_ptr
, *to
);
1536 if (format
>= JOURNAL_FORMAT_RESILIENT
) {
1537 return JOURNAL_ENVELOPE_RESILIENT
+ entry_size
;
1539 return JOURNAL_ENVELOPE_LEGACY
+ entry_size
;
1544 * set write error callback
1546 * Set a callback/context to trigger if we get a write error from
1547 * the objecter. This may be from an explicit request (e.g., flush)
1548 * or something async the journaler did on its own (e.g., journal
1551 * It is only used once; if the caller continues to use the
1552 * Journaler and wants to hear about errors, it needs to reset the
1555 * @param c callback/context to trigger on error
1557 void Journaler::set_write_error_handler(Context
*c
) {
1559 ceph_assert(!on_write_error
);
1560 on_write_error
= wrap_finisher(c
);
1561 called_write_error
= false;
1566 * Wrap a context in a C_OnFinisher, if it is non-NULL
1568 * Utility function to avoid lots of error-prone and verbose
1569 * NULL checking on contexts passed in.
1571 C_OnFinisher
*Journaler::wrap_finisher(Context
*c
)
1574 return new C_OnFinisher(c
, finisher
);
1580 void Journaler::shutdown()
1584 ldout(cct
, 1) << __func__
<< dendl
;
1586 state
= STATE_STOPPING
;
1589 // Kick out anyone reading from journal
1592 C_OnFinisher
*f
= on_readable
;
1594 f
->complete(-EAGAIN
);
1598 ls
.swap(waitfor_recover
);
1599 finish_contexts(cct
, ls
, -ESHUTDOWN
);
1601 std::map
<uint64_t, std::list
<Context
*> >::iterator i
;
1602 for (i
= waitfor_safe
.begin(); i
!= waitfor_safe
.end(); ++i
) {
1603 finish_contexts(cct
, i
->second
, -EAGAIN
);
1605 waitfor_safe
.clear();