]> git.proxmox.com Git - ceph.git/blob - ceph/src/osdc/Journaler.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / osdc / Journaler.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include "common/perf_counters.h"
16 #include "common/dout.h"
17 #include "include/Context.h"
18 #include "msg/Messenger.h"
19 #include "osdc/Journaler.h"
20 #include "common/errno.h"
21 #include "include/ceph_assert.h"
22 #include "common/Finisher.h"
23
24 #define dout_subsys ceph_subsys_journaler
25 #undef dout_prefix
26 #define dout_prefix *_dout << objecter->messenger->get_myname() \
27 << ".journaler." << name << (readonly ? "(ro) ":"(rw) ")
28
29 using std::chrono::seconds;
30
31
32 class Journaler::C_DelayFlush : public Context {
33 Journaler *journaler;
34 public:
35 explicit C_DelayFlush(Journaler *j) : journaler(j) {}
36 void finish(int r) override {
37 journaler->_do_delayed_flush();
38 }
39 };
40
41 void Journaler::set_readonly()
42 {
43 lock_guard l(lock);
44
45 ldout(cct, 1) << "set_readonly" << dendl;
46 readonly = true;
47 }
48
49 void Journaler::set_writeable()
50 {
51 lock_guard l(lock);
52
53 ldout(cct, 1) << "set_writeable" << dendl;
54 readonly = false;
55 }
56
57 void Journaler::create(file_layout_t *l, stream_format_t const sf)
58 {
59 lock_guard lk(lock);
60
61 ceph_assert(!readonly);
62 state = STATE_ACTIVE;
63
64 stream_format = sf;
65 journal_stream.set_format(sf);
66 _set_layout(l);
67
68 prezeroing_pos = prezero_pos = write_pos = flush_pos =
69 safe_pos = read_pos = requested_pos = received_pos =
70 expire_pos = trimming_pos = trimmed_pos =
71 next_safe_pos = layout.get_period();
72
73 ldout(cct, 1) << "created blank journal at inode 0x" << std::hex << ino
74 << std::dec << ", format=" << stream_format << dendl;
75 }
76
77 void Journaler::set_layout(file_layout_t const *l)
78 {
79 lock_guard lk(lock);
80 _set_layout(l);
81 }
82
83 void Journaler::_set_layout(file_layout_t const *l)
84 {
85 layout = *l;
86
87 if (layout.pool_id != pg_pool) {
88 // user can reset pool id through cephfs-journal-tool
89 lderr(cct) << "may got older pool id from header layout" << dendl;
90 ceph_abort();
91 }
92 last_written.layout = layout;
93 last_committed.layout = layout;
94
95 // prefetch intelligently.
96 // (watch out, this is big if you use big objects or weird striping)
97 uint64_t periods = cct->_conf.get_val<uint64_t>("journaler_prefetch_periods");
98 fetch_len = layout.get_period() * periods;
99 }
100
101
102 /***************** HEADER *******************/
103
104 ostream& operator<<(ostream &out, const Journaler::Header &h)
105 {
106 return out << "loghead(trim " << h.trimmed_pos
107 << ", expire " << h.expire_pos
108 << ", write " << h.write_pos
109 << ", stream_format " << (int)(h.stream_format)
110 << ")";
111 }
112
113 class Journaler::C_ReadHead : public Context {
114 Journaler *ls;
115 public:
116 bufferlist bl;
117 explicit C_ReadHead(Journaler *l) : ls(l) {}
118 void finish(int r) override {
119 ls->_finish_read_head(r, bl);
120 }
121 };
122
123 class Journaler::C_RereadHead : public Context {
124 Journaler *ls;
125 Context *onfinish;
126 public:
127 bufferlist bl;
128 C_RereadHead(Journaler *l, Context *onfinish_) : ls (l),
129 onfinish(onfinish_) {}
130 void finish(int r) override {
131 ls->_finish_reread_head(r, bl, onfinish);
132 }
133 };
134
135 class Journaler::C_ProbeEnd : public Context {
136 Journaler *ls;
137 public:
138 uint64_t end;
139 explicit C_ProbeEnd(Journaler *l) : ls(l), end(-1) {}
140 void finish(int r) override {
141 ls->_finish_probe_end(r, end);
142 }
143 };
144
145 class Journaler::C_ReProbe : public Context {
146 Journaler *ls;
147 C_OnFinisher *onfinish;
148 public:
149 uint64_t end;
150 C_ReProbe(Journaler *l, C_OnFinisher *onfinish_) :
151 ls(l), onfinish(onfinish_), end(0) {}
152 void finish(int r) override {
153 ls->_finish_reprobe(r, end, onfinish);
154 }
155 };
156
157 void Journaler::recover(Context *onread)
158 {
159 lock_guard l(lock);
160 if (is_stopping()) {
161 onread->complete(-EAGAIN);
162 return;
163 }
164
165 ldout(cct, 1) << "recover start" << dendl;
166 ceph_assert(state != STATE_ACTIVE);
167 ceph_assert(readonly);
168
169 if (onread)
170 waitfor_recover.push_back(wrap_finisher(onread));
171
172 if (state != STATE_UNDEF) {
173 ldout(cct, 1) << "recover - already recovering" << dendl;
174 return;
175 }
176
177 ldout(cct, 1) << "read_head" << dendl;
178 state = STATE_READHEAD;
179 C_ReadHead *fin = new C_ReadHead(this);
180 _read_head(fin, &fin->bl);
181 }
182
183 void Journaler::_read_head(Context *on_finish, bufferlist *bl)
184 {
185 // lock is locked
186 ceph_assert(state == STATE_READHEAD || state == STATE_REREADHEAD);
187
188 object_t oid = file_object_t(ino, 0);
189 object_locator_t oloc(pg_pool);
190 objecter->read_full(oid, oloc, CEPH_NOSNAP, bl, 0, wrap_finisher(on_finish));
191 }
192
193 void Journaler::reread_head(Context *onfinish)
194 {
195 lock_guard l(lock);
196 _reread_head(wrap_finisher(onfinish));
197 }
198
199 /**
200 * Re-read the head from disk, and set the write_pos, expire_pos, trimmed_pos
201 * from the on-disk header. This switches the state to STATE_REREADHEAD for
202 * the duration, and you shouldn't start a re-read while other operations are
203 * in-flight, nor start other operations while a re-read is in progress.
204 * Also, don't call this until the Journaler has finished its recovery and has
205 * gone STATE_ACTIVE!
206 */
207 void Journaler::_reread_head(Context *onfinish)
208 {
209 ldout(cct, 10) << "reread_head" << dendl;
210 ceph_assert(state == STATE_ACTIVE);
211
212 state = STATE_REREADHEAD;
213 C_RereadHead *fin = new C_RereadHead(this, onfinish);
214 _read_head(fin, &fin->bl);
215 }
216
217 void Journaler::_finish_reread_head(int r, bufferlist& bl, Context *finish)
218 {
219 lock_guard l(lock);
220 if (is_stopping()) {
221 finish->complete(-EAGAIN);
222 return;
223 }
224
225 //read on-disk header into
226 ceph_assert(bl.length() || r < 0 );
227
228 // unpack header
229 if (r == 0) {
230 Header h;
231 auto p = bl.cbegin();
232 try {
233 decode(h, p);
234 } catch (const buffer::error &e) {
235 finish->complete(-EINVAL);
236 return;
237 }
238 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos
239 = h.write_pos;
240 expire_pos = h.expire_pos;
241 trimmed_pos = trimming_pos = h.trimmed_pos;
242 init_headers(h);
243 state = STATE_ACTIVE;
244 }
245
246 finish->complete(r);
247 }
248
249 void Journaler::_finish_read_head(int r, bufferlist& bl)
250 {
251 lock_guard l(lock);
252 if (is_stopping())
253 return;
254
255 ceph_assert(state == STATE_READHEAD);
256
257 if (r!=0) {
258 ldout(cct, 0) << "error getting journal off disk" << dendl;
259 list<Context*> ls;
260 ls.swap(waitfor_recover);
261 finish_contexts(cct, ls, r);
262 return;
263 }
264
265 if (bl.length() == 0) {
266 ldout(cct, 1) << "_finish_read_head r=" << r
267 << " read 0 bytes, assuming empty log" << dendl;
268 state = STATE_ACTIVE;
269 list<Context*> ls;
270 ls.swap(waitfor_recover);
271 finish_contexts(cct, ls, 0);
272 return;
273 }
274
275 // unpack header
276 bool corrupt = false;
277 Header h;
278 auto p = bl.cbegin();
279 try {
280 decode(h, p);
281
282 if (h.magic != magic) {
283 ldout(cct, 0) << "on disk magic '" << h.magic << "' != my magic '"
284 << magic << "'" << dendl;
285 corrupt = true;
286 } else if (h.write_pos < h.expire_pos || h.expire_pos < h.trimmed_pos) {
287 ldout(cct, 0) << "Corrupt header (bad offsets): " << h << dendl;
288 corrupt = true;
289 }
290 } catch (const buffer::error &e) {
291 corrupt = true;
292 }
293
294 if (corrupt) {
295 list<Context*> ls;
296 ls.swap(waitfor_recover);
297 finish_contexts(cct, ls, -EINVAL);
298 return;
299 }
300
301 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos
302 = h.write_pos;
303 read_pos = requested_pos = received_pos = expire_pos = h.expire_pos;
304 trimmed_pos = trimming_pos = h.trimmed_pos;
305
306 init_headers(h);
307 _set_layout(&h.layout);
308 stream_format = h.stream_format;
309 journal_stream.set_format(h.stream_format);
310
311 ldout(cct, 1) << "_finish_read_head " << h
312 << ". probing for end of log (from " << write_pos << ")..."
313 << dendl;
314 C_ProbeEnd *fin = new C_ProbeEnd(this);
315 state = STATE_PROBING;
316 _probe(fin, &fin->end);
317 }
318
319 void Journaler::_probe(Context *finish, uint64_t *end)
320 {
321 // lock is locked
322 ldout(cct, 1) << "probing for end of the log" << dendl;
323 ceph_assert(state == STATE_PROBING || state == STATE_REPROBING);
324 // probe the log
325 filer.probe(ino, &layout, CEPH_NOSNAP,
326 write_pos, end, true, 0, wrap_finisher(finish));
327 }
328
329 void Journaler::_reprobe(C_OnFinisher *finish)
330 {
331 ldout(cct, 10) << "reprobe" << dendl;
332 ceph_assert(state == STATE_ACTIVE);
333
334 state = STATE_REPROBING;
335 C_ReProbe *fin = new C_ReProbe(this, finish);
336 _probe(fin, &fin->end);
337 }
338
339
340 void Journaler::_finish_reprobe(int r, uint64_t new_end,
341 C_OnFinisher *onfinish)
342 {
343 lock_guard l(lock);
344 if (is_stopping()) {
345 onfinish->complete(-EAGAIN);
346 return;
347 }
348
349 ceph_assert(new_end >= write_pos || r < 0);
350 ldout(cct, 1) << "_finish_reprobe new_end = " << new_end
351 << " (header had " << write_pos << ")."
352 << dendl;
353 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = new_end;
354 state = STATE_ACTIVE;
355 onfinish->complete(r);
356 }
357
358 void Journaler::_finish_probe_end(int r, uint64_t end)
359 {
360 lock_guard l(lock);
361 if (is_stopping())
362 return;
363
364 ceph_assert(state == STATE_PROBING);
365 if (r < 0) { // error in probing
366 goto out;
367 }
368 if (((int64_t)end) == -1) {
369 end = write_pos;
370 ldout(cct, 1) << "_finish_probe_end write_pos = " << end << " (header had "
371 << write_pos << "). log was empty. recovered." << dendl;
372 ceph_abort(); // hrm.
373 } else {
374 ceph_assert(end >= write_pos);
375 ldout(cct, 1) << "_finish_probe_end write_pos = " << end
376 << " (header had " << write_pos << "). recovered."
377 << dendl;
378 }
379
380 state = STATE_ACTIVE;
381
382 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = end;
383
384 out:
385 // done.
386 list<Context*> ls;
387 ls.swap(waitfor_recover);
388 finish_contexts(cct, ls, r);
389 }
390
391 class Journaler::C_RereadHeadProbe : public Context
392 {
393 Journaler *ls;
394 C_OnFinisher *final_finish;
395 public:
396 C_RereadHeadProbe(Journaler *l, C_OnFinisher *finish) :
397 ls(l), final_finish(finish) {}
398 void finish(int r) override {
399 ls->_finish_reread_head_and_probe(r, final_finish);
400 }
401 };
402
403 void Journaler::reread_head_and_probe(Context *onfinish)
404 {
405 lock_guard l(lock);
406
407 ceph_assert(state == STATE_ACTIVE);
408 _reread_head(new C_RereadHeadProbe(this, wrap_finisher(onfinish)));
409 }
410
411 void Journaler::_finish_reread_head_and_probe(int r, C_OnFinisher *onfinish)
412 {
413 // Expect to be called back from finish_reread_head, which already takes lock
414 // lock is locked
415 if (is_stopping()) {
416 onfinish->complete(-EAGAIN);
417 return;
418 }
419
420 ceph_assert(!r); //if we get an error, we're boned
421 _reprobe(onfinish);
422 }
423
424
425 // WRITING
426
427 class Journaler::C_WriteHead : public Context {
428 public:
429 Journaler *ls;
430 Header h;
431 C_OnFinisher *oncommit;
432 C_WriteHead(Journaler *l, Header& h_, C_OnFinisher *c) : ls(l), h(h_),
433 oncommit(c) {}
434 void finish(int r) override {
435 ls->_finish_write_head(r, h, oncommit);
436 }
437 };
438
439 void Journaler::write_head(Context *oncommit)
440 {
441 lock_guard l(lock);
442 _write_head(oncommit);
443 }
444
445
446 void Journaler::_write_head(Context *oncommit)
447 {
448 ceph_assert(!readonly);
449 ceph_assert(state == STATE_ACTIVE);
450 last_written.trimmed_pos = trimmed_pos;
451 last_written.expire_pos = expire_pos;
452 last_written.unused_field = expire_pos;
453 last_written.write_pos = safe_pos;
454 last_written.stream_format = stream_format;
455 ldout(cct, 10) << "write_head " << last_written << dendl;
456
457 // Avoid persisting bad pointers in case of bugs
458 ceph_assert(last_written.write_pos >= last_written.expire_pos);
459 ceph_assert(last_written.expire_pos >= last_written.trimmed_pos);
460
461 last_wrote_head = ceph::real_clock::now();
462
463 bufferlist bl;
464 encode(last_written, bl);
465 SnapContext snapc;
466
467 object_t oid = file_object_t(ino, 0);
468 object_locator_t oloc(pg_pool);
469 objecter->write_full(oid, oloc, snapc, bl, ceph::real_clock::now(), 0,
470 wrap_finisher(new C_WriteHead(
471 this, last_written,
472 wrap_finisher(oncommit))),
473 0, 0, write_iohint);
474 }
475
476 void Journaler::_finish_write_head(int r, Header &wrote,
477 C_OnFinisher *oncommit)
478 {
479 lock_guard l(lock);
480
481 if (r < 0) {
482 lderr(cct) << "_finish_write_head got " << cpp_strerror(r) << dendl;
483 handle_write_error(r);
484 return;
485 }
486 ceph_assert(!readonly);
487 ldout(cct, 10) << "_finish_write_head " << wrote << dendl;
488 last_committed = wrote;
489 if (oncommit) {
490 oncommit->complete(r);
491 }
492
493 _trim(); // trim?
494 }
495
496
497 /***************** WRITING *******************/
498
499 class Journaler::C_Flush : public Context {
500 Journaler *ls;
501 uint64_t start;
502 ceph::real_time stamp;
503 public:
504 C_Flush(Journaler *l, int64_t s, ceph::real_time st)
505 : ls(l), start(s), stamp(st) {}
506 void finish(int r) override {
507 ls->_finish_flush(r, start, stamp);
508 }
509 };
510
511 void Journaler::_finish_flush(int r, uint64_t start, ceph::real_time stamp)
512 {
513 lock_guard l(lock);
514 ceph_assert(!readonly);
515
516 if (r < 0) {
517 lderr(cct) << "_finish_flush got " << cpp_strerror(r) << dendl;
518 handle_write_error(r);
519 return;
520 }
521
522 ceph_assert(start < flush_pos);
523
524 // calc latency?
525 if (logger) {
526 ceph::timespan lat = ceph::real_clock::now() - stamp;
527 logger->tinc(logger_key_lat, lat);
528 }
529
530 // adjust safe_pos
531 auto it = pending_safe.find(start);
532 ceph_assert(it != pending_safe.end());
533 uint64_t min_next_safe_pos = pending_safe.begin()->second;
534 pending_safe.erase(it);
535 if (pending_safe.empty())
536 safe_pos = next_safe_pos;
537 else
538 safe_pos = min_next_safe_pos;
539
540 ldout(cct, 10) << "_finish_flush safe from " << start
541 << ", pending_safe " << pending_safe
542 << ", (prezeroing/prezero)/write/flush/safe positions now "
543 << "(" << prezeroing_pos << "/" << prezero_pos << ")/"
544 << write_pos << "/" << flush_pos << "/" << safe_pos
545 << dendl;
546
547 // kick waiters <= safe_pos
548 if (!waitfor_safe.empty()) {
549 list<Context*> ls;
550 while (!waitfor_safe.empty()) {
551 auto it = waitfor_safe.begin();
552 if (it->first > safe_pos)
553 break;
554 ls.splice(ls.end(), it->second);
555 waitfor_safe.erase(it);
556 }
557 finish_contexts(cct, ls);
558 }
559 }
560
561
562
563 uint64_t Journaler::append_entry(bufferlist& bl)
564 {
565 unique_lock l(lock);
566
567 ceph_assert(!readonly);
568 uint32_t s = bl.length();
569
570 // append
571 size_t delta = bl.length() + journal_stream.get_envelope_size();
572 // write_buf space is nearly full
573 if (!write_buf_throttle.get_or_fail(delta)) {
574 l.unlock();
575 ldout(cct, 10) << "write_buf_throttle wait, delta " << delta << dendl;
576 write_buf_throttle.get(delta);
577 l.lock();
578 }
579 ldout(cct, 20) << "write_buf_throttle get, delta " << delta << dendl;
580 size_t wrote = journal_stream.write(bl, &write_buf, write_pos);
581 ldout(cct, 10) << "append_entry len " << s << " to " << write_pos << "~"
582 << wrote << dendl;
583 write_pos += wrote;
584
585 // flush previous object?
586 uint64_t su = get_layout_period();
587 ceph_assert(su > 0);
588 uint64_t write_off = write_pos % su;
589 uint64_t write_obj = write_pos / su;
590 uint64_t flush_obj = flush_pos / su;
591 if (write_obj != flush_obj) {
592 ldout(cct, 10) << " flushing completed object(s) (su " << su << " wro "
593 << write_obj << " flo " << flush_obj << ")" << dendl;
594 _do_flush(write_buf.length() - write_off);
595
596 // if _do_flush() skips flushing some data, it does do a best effort to
597 // update next_safe_pos.
598 if (write_buf.length() > 0 &&
599 write_buf.length() <= wrote) { // the unflushed data are within this entry
600 // set next_safe_pos to end of previous entry
601 next_safe_pos = write_pos - wrote;
602 }
603 }
604
605 return write_pos;
606 }
607
608
609 void Journaler::_do_flush(unsigned amount)
610 {
611 if (is_stopping())
612 return;
613 if (write_pos == flush_pos)
614 return;
615 ceph_assert(write_pos > flush_pos);
616 ceph_assert(!readonly);
617
618 // flush
619 uint64_t len = write_pos - flush_pos;
620 ceph_assert(len == write_buf.length());
621 if (amount && amount < len)
622 len = amount;
623
624 // zero at least two full periods ahead. this ensures
625 // that the next object will not exist.
626 uint64_t period = get_layout_period();
627 if (flush_pos + len + 2*period > prezero_pos) {
628 _issue_prezero();
629
630 int64_t newlen = prezero_pos - flush_pos - period;
631 if (newlen <= 0) {
632 ldout(cct, 10) << "_do_flush wanted to do " << flush_pos << "~" << len
633 << " already too close to prezero_pos " << prezero_pos
634 << ", zeroing first" << dendl;
635 waiting_for_zero_pos = flush_pos + len;
636 return;
637 }
638 if (static_cast<uint64_t>(newlen) < len) {
639 ldout(cct, 10) << "_do_flush wanted to do " << flush_pos << "~" << len
640 << " but hit prezero_pos " << prezero_pos
641 << ", will do " << flush_pos << "~" << newlen << dendl;
642 waiting_for_zero_pos = flush_pos + len;
643 len = newlen;
644 }
645 }
646 ldout(cct, 10) << "_do_flush flushing " << flush_pos << "~" << len << dendl;
647
648 // submit write for anything pending
649 // flush _start_ pos to _finish_flush
650 ceph::real_time now = ceph::real_clock::now();
651 SnapContext snapc;
652
653 Context *onsafe = new C_Flush(this, flush_pos, now); // on COMMIT
654 pending_safe[flush_pos] = next_safe_pos;
655
656 bufferlist write_bl;
657
658 // adjust pointers
659 if (len == write_buf.length()) {
660 write_bl.swap(write_buf);
661 next_safe_pos = write_pos;
662 } else {
663 write_buf.splice(0, len, &write_bl);
664 // Keys of waitfor_safe map are journal entry boundaries.
665 // Try finding a journal entry that we are actually flushing
666 // and set next_safe_pos to end of it. This is best effort.
667 // The one we found may not be the lastest flushing entry.
668 auto p = waitfor_safe.lower_bound(flush_pos + len);
669 if (p != waitfor_safe.end()) {
670 if (p->first > flush_pos + len && p != waitfor_safe.begin())
671 --p;
672 if (p->first <= flush_pos + len && p->first > next_safe_pos)
673 next_safe_pos = p->first;
674 }
675 }
676
677 filer.write(ino, &layout, snapc,
678 flush_pos, len, write_bl, ceph::real_clock::now(),
679 0,
680 wrap_finisher(onsafe), write_iohint);
681
682 flush_pos += len;
683 ceph_assert(write_buf.length() == write_pos - flush_pos);
684 write_buf_throttle.put(len);
685 ldout(cct, 20) << "write_buf_throttle put, len " << len << dendl;
686
687 ldout(cct, 10)
688 << "_do_flush (prezeroing/prezero)/write/flush/safe pointers now at "
689 << "(" << prezeroing_pos << "/" << prezero_pos << ")/" << write_pos
690 << "/" << flush_pos << "/" << safe_pos << dendl;
691
692 _issue_prezero();
693 }
694
695
696 void Journaler::wait_for_flush(Context *onsafe)
697 {
698 lock_guard l(lock);
699 if (is_stopping()) {
700 onsafe->complete(-EAGAIN);
701 return;
702 }
703 _wait_for_flush(onsafe);
704 }
705
706 void Journaler::_wait_for_flush(Context *onsafe)
707 {
708 ceph_assert(!readonly);
709
710 // all flushed and safe?
711 if (write_pos == safe_pos) {
712 ceph_assert(write_buf.length() == 0);
713 ldout(cct, 10)
714 << "flush nothing to flush, (prezeroing/prezero)/write/flush/safe "
715 "pointers at " << "(" << prezeroing_pos << "/" << prezero_pos << ")/"
716 << write_pos << "/" << flush_pos << "/" << safe_pos << dendl;
717 if (onsafe) {
718 finisher->queue(onsafe, 0);
719 }
720 return;
721 }
722
723 // queue waiter
724 if (onsafe) {
725 waitfor_safe[write_pos].push_back(wrap_finisher(onsafe));
726 }
727 }
728
729 void Journaler::flush(Context *onsafe)
730 {
731 lock_guard l(lock);
732 if (is_stopping()) {
733 onsafe->complete(-EAGAIN);
734 return;
735 }
736 _flush(wrap_finisher(onsafe));
737 }
738
739 void Journaler::_flush(C_OnFinisher *onsafe)
740 {
741 ceph_assert(!readonly);
742
743 if (write_pos == flush_pos) {
744 ceph_assert(write_buf.length() == 0);
745 ldout(cct, 10) << "flush nothing to flush, (prezeroing/prezero)/write/"
746 "flush/safe pointers at " << "(" << prezeroing_pos << "/" << prezero_pos
747 << ")/" << write_pos << "/" << flush_pos << "/" << safe_pos
748 << dendl;
749 if (onsafe) {
750 onsafe->complete(0);
751 }
752 } else {
753 _do_flush();
754 _wait_for_flush(onsafe);
755 }
756
757 // write head?
758 if (_write_head_needed()) {
759 _write_head();
760 }
761 }
762
763 bool Journaler::_write_head_needed()
764 {
765 return last_wrote_head + seconds(cct->_conf.get_val<int64_t>("journaler_write_head_interval"))
766 < ceph::real_clock::now();
767 }
768
769
770 /*************** prezeroing ******************/
771
772 struct C_Journaler_Prezero : public Context {
773 Journaler *journaler;
774 uint64_t from, len;
775 C_Journaler_Prezero(Journaler *j, uint64_t f, uint64_t l)
776 : journaler(j), from(f), len(l) {}
777 void finish(int r) override {
778 journaler->_finish_prezero(r, from, len);
779 }
780 };
781
782 void Journaler::_issue_prezero()
783 {
784 ceph_assert(prezeroing_pos >= flush_pos);
785
786 uint64_t num_periods = cct->_conf.get_val<uint64_t>("journaler_prezero_periods");
787 /*
788 * issue zero requests based on write_pos, even though the invariant
789 * is that we zero ahead of flush_pos.
790 */
791 uint64_t period = get_layout_period();
792 uint64_t to = write_pos + period * num_periods + period - 1;
793 to -= to % period;
794
795 if (prezeroing_pos >= to) {
796 ldout(cct, 20) << "_issue_prezero target " << to << " <= prezeroing_pos "
797 << prezeroing_pos << dendl;
798 return;
799 }
800
801 while (prezeroing_pos < to) {
802 uint64_t len;
803 if (prezeroing_pos % period == 0) {
804 len = period;
805 ldout(cct, 10) << "_issue_prezero removing " << prezeroing_pos << "~"
806 << period << " (full period)" << dendl;
807 } else {
808 len = period - (prezeroing_pos % period);
809 ldout(cct, 10) << "_issue_prezero zeroing " << prezeroing_pos << "~"
810 << len << " (partial period)" << dendl;
811 }
812 SnapContext snapc;
813 Context *c = wrap_finisher(new C_Journaler_Prezero(this, prezeroing_pos,
814 len));
815 filer.zero(ino, &layout, snapc, prezeroing_pos, len,
816 ceph::real_clock::now(), 0, c);
817 prezeroing_pos += len;
818 }
819 }
820
821 // Lock cycle because we get called out of objecter callback (holding
822 // objecter read lock), but there are also cases where we take the journaler
823 // lock before calling into objecter to do I/O.
824 void Journaler::_finish_prezero(int r, uint64_t start, uint64_t len)
825 {
826 lock_guard l(lock);
827
828 ldout(cct, 10) << "_prezeroed to " << start << "~" << len
829 << ", prezeroing/prezero was " << prezeroing_pos << "/"
830 << prezero_pos << ", pending " << pending_zero
831 << dendl;
832 if (r < 0 && r != -ENOENT) {
833 lderr(cct) << "_prezeroed got " << cpp_strerror(r) << dendl;
834 handle_write_error(r);
835 return;
836 }
837
838 ceph_assert(r == 0 || r == -ENOENT);
839
840 if (start == prezero_pos) {
841 prezero_pos += len;
842 while (!pending_zero.empty() &&
843 pending_zero.begin().get_start() == prezero_pos) {
844 interval_set<uint64_t>::iterator b(pending_zero.begin());
845 prezero_pos += b.get_len();
846 pending_zero.erase(b);
847 }
848
849 if (waiting_for_zero_pos > flush_pos) {
850 _do_flush(waiting_for_zero_pos - flush_pos);
851 }
852
853 if (prezero_pos == prezeroing_pos &&
854 !waitfor_prezero.empty()) {
855 list<Context*> ls;
856 ls.swap(waitfor_prezero);
857 finish_contexts(cct, ls, 0);
858 }
859 } else {
860 pending_zero.insert(start, len);
861 }
862 ldout(cct, 10) << "_prezeroed prezeroing/prezero now " << prezeroing_pos
863 << "/" << prezero_pos
864 << ", pending " << pending_zero
865 << dendl;
866 }
867
868 void Journaler::wait_for_prezero(Context *onfinish)
869 {
870 ceph_assert(onfinish);
871 lock_guard l(lock);
872
873 if (prezero_pos == prezeroing_pos) {
874 finisher->queue(onfinish, 0);
875 return;
876 }
877 waitfor_prezero.push_back(wrap_finisher(onfinish));
878 }
879
880
881 /***************** READING *******************/
882
883
884 class Journaler::C_Read : public Context {
885 Journaler *ls;
886 uint64_t offset;
887 uint64_t length;
888 public:
889 bufferlist bl;
890 C_Read(Journaler *j, uint64_t o, uint64_t l) : ls(j), offset(o), length(l) {}
891 void finish(int r) override {
892 ls->_finish_read(r, offset, length, bl);
893 }
894 };
895
896 class Journaler::C_RetryRead : public Context {
897 Journaler *ls;
898 public:
899 explicit C_RetryRead(Journaler *l) : ls(l) {}
900
901 void finish(int r) override {
902 // Should only be called from waitfor_safe i.e. already inside lock
903 // (ls->lock is locked
904 ls->_prefetch();
905 }
906 };
907
908 void Journaler::_finish_read(int r, uint64_t offset, uint64_t length,
909 bufferlist& bl)
910 {
911 lock_guard l(lock);
912
913 if (r < 0) {
914 ldout(cct, 0) << "_finish_read got error " << r << dendl;
915 error = r;
916 } else {
917 ldout(cct, 10) << "_finish_read got " << offset << "~" << bl.length()
918 << dendl;
919 if (bl.length() < length) {
920 ldout(cct, 0) << "_finish_read got less than expected (" << length << ")"
921 << dendl;
922 error = -EINVAL;
923 }
924 }
925
926 if (error) {
927 if (on_readable) {
928 C_OnFinisher *f = on_readable;
929 on_readable = 0;
930 f->complete(error);
931 }
932 return;
933 }
934
935 prefetch_buf[offset].swap(bl);
936
937 try {
938 _assimilate_prefetch();
939 } catch (const buffer::error &err) {
940 lderr(cct) << "_decode error from assimilate_prefetch" << dendl;
941 error = -EINVAL;
942 if (on_readable) {
943 C_OnFinisher *f = on_readable;
944 on_readable = 0;
945 f->complete(error);
946 }
947 return;
948 }
949 _prefetch();
950 }
951
952 void Journaler::_assimilate_prefetch()
953 {
954 bool was_readable = readable;
955
956 bool got_any = false;
957 while (!prefetch_buf.empty()) {
958 map<uint64_t,bufferlist>::iterator p = prefetch_buf.begin();
959 if (p->first != received_pos) {
960 uint64_t gap = p->first - received_pos;
961 ldout(cct, 10) << "_assimilate_prefetch gap of " << gap
962 << " from received_pos " << received_pos
963 << " to first prefetched buffer " << p->first << dendl;
964 break;
965 }
966
967 ldout(cct, 10) << "_assimilate_prefetch " << p->first << "~"
968 << p->second.length() << dendl;
969 received_pos += p->second.length();
970 read_buf.claim_append(p->second);
971 ceph_assert(received_pos <= requested_pos);
972 prefetch_buf.erase(p);
973 got_any = true;
974 }
975
976 if (got_any) {
977 ldout(cct, 10) << "_assimilate_prefetch read_buf now " << read_pos << "~"
978 << read_buf.length() << ", read pointers read_pos=" << read_pos
979 << " received_pos=" << received_pos << " requested_pos=" << requested_pos
980 << dendl;
981
982 // Update readability (this will also hit any decode errors resulting
983 // from bad data)
984 readable = _is_readable();
985 }
986
987 if ((got_any && !was_readable && readable) || read_pos == write_pos) {
988 // readable!
989 ldout(cct, 10) << "_finish_read now readable (or at journal end) readable="
990 << readable << " read_pos=" << read_pos << " write_pos="
991 << write_pos << dendl;
992 if (on_readable) {
993 C_OnFinisher *f = on_readable;
994 on_readable = 0;
995 f->complete(0);
996 }
997 }
998 }
999
1000 void Journaler::_issue_read(uint64_t len)
1001 {
1002 // stuck at safe_pos? (this is needed if we are reading the tail of
1003 // a journal we are also writing to)
1004 ceph_assert(requested_pos <= safe_pos);
1005 if (requested_pos == safe_pos) {
1006 ldout(cct, 10) << "_issue_read requested_pos = safe_pos = " << safe_pos
1007 << ", waiting" << dendl;
1008 ceph_assert(write_pos > requested_pos);
1009 if (pending_safe.empty()) {
1010 _flush(NULL);
1011 }
1012
1013 // Make sure keys of waitfor_safe map are journal entry boundaries.
1014 // The key we used here is either next_safe_pos or old value of
1015 // next_safe_pos. next_safe_pos is always set to journal entry
1016 // boundary.
1017 auto p = pending_safe.rbegin();
1018 if (p != pending_safe.rend())
1019 waitfor_safe[p->second].push_back(new C_RetryRead(this));
1020 else
1021 waitfor_safe[next_safe_pos].push_back(new C_RetryRead(this));
1022 return;
1023 }
1024
1025 // don't read too much
1026 if (requested_pos + len > safe_pos) {
1027 len = safe_pos - requested_pos;
1028 ldout(cct, 10) << "_issue_read reading only up to safe_pos " << safe_pos
1029 << dendl;
1030 }
1031
1032 // go.
1033 ldout(cct, 10) << "_issue_read reading " << requested_pos << "~" << len
1034 << ", read pointers read_pos=" << read_pos << " received_pos=" << received_pos
1035 << " requested_pos+len=" << (requested_pos+len) << dendl;
1036
1037 // step by period (object). _don't_ do a single big filer.read()
1038 // here because it will wait for all object reads to complete before
1039 // giving us back any data. this way we can process whatever bits
1040 // come in that are contiguous.
1041 uint64_t period = get_layout_period();
1042 while (len > 0) {
1043 uint64_t e = requested_pos + period;
1044 e -= e % period;
1045 uint64_t l = e - requested_pos;
1046 if (l > len)
1047 l = len;
1048 C_Read *c = new C_Read(this, requested_pos, l);
1049 filer.read(ino, &layout, CEPH_NOSNAP, requested_pos, l, &c->bl, 0,
1050 wrap_finisher(c), CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
1051 requested_pos += l;
1052 len -= l;
1053 }
1054 }
1055
1056 void Journaler::_prefetch()
1057 {
1058 if (is_stopping())
1059 return;
1060
1061 ldout(cct, 10) << "_prefetch" << dendl;
1062 // prefetch
1063 uint64_t pf;
1064 if (temp_fetch_len) {
1065 ldout(cct, 10) << "_prefetch temp_fetch_len " << temp_fetch_len << dendl;
1066 pf = temp_fetch_len;
1067 temp_fetch_len = 0;
1068 } else {
1069 pf = fetch_len;
1070 }
1071
1072 uint64_t raw_target = read_pos + pf;
1073
1074 // read full log segments, so increase if necessary
1075 uint64_t period = get_layout_period();
1076 uint64_t remainder = raw_target % period;
1077 uint64_t adjustment = remainder ? period - remainder : 0;
1078 uint64_t target = raw_target + adjustment;
1079
1080 // don't read past the log tail
1081 if (target > write_pos)
1082 target = write_pos;
1083
1084 if (requested_pos < target) {
1085 uint64_t len = target - requested_pos;
1086 ldout(cct, 10) << "_prefetch " << pf << " requested_pos " << requested_pos
1087 << " < target " << target << " (" << raw_target
1088 << "), prefetching " << len << dendl;
1089
1090 if (pending_safe.empty() && write_pos > safe_pos) {
1091 // If we are reading and writing the journal, then we may need
1092 // to issue a flush if one isn't already in progress.
1093 // Avoid doing a flush every time so that if we do write/read/write/read
1094 // we don't end up flushing after every write.
1095 ldout(cct, 10) << "_prefetch: requested_pos=" << requested_pos
1096 << ", read_pos=" << read_pos
1097 << ", write_pos=" << write_pos
1098 << ", safe_pos=" << safe_pos << dendl;
1099 _do_flush();
1100 }
1101
1102 _issue_read(len);
1103 }
1104 }
1105
1106
1107 /*
1108 * _is_readable() - return true if next entry is ready.
1109 */
1110 bool Journaler::_is_readable()
1111 {
1112 // anything to read?
1113 if (read_pos == write_pos)
1114 return false;
1115
1116 // Check if the retrieve bytestream has enough for an entry
1117 uint64_t need;
1118 if (journal_stream.readable(read_buf, &need)) {
1119 return true;
1120 }
1121
1122 ldout (cct, 10) << "_is_readable read_buf.length() == " << read_buf.length()
1123 << ", but need " << need << " for next entry; fetch_len is "
1124 << fetch_len << dendl;
1125
1126 // partial fragment at the end?
1127 if (received_pos == write_pos) {
1128 ldout(cct, 10) << "is_readable() detected partial entry at tail, "
1129 "adjusting write_pos to " << read_pos << dendl;
1130
1131 // adjust write_pos
1132 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = read_pos;
1133 ceph_assert(write_buf.length() == 0);
1134 ceph_assert(waitfor_safe.empty());
1135
1136 // reset read state
1137 requested_pos = received_pos = read_pos;
1138 read_buf.clear();
1139
1140 // FIXME: truncate on disk?
1141
1142 return false;
1143 }
1144
1145 if (need > fetch_len) {
1146 temp_fetch_len = need;
1147 ldout(cct, 10) << "_is_readable noting temp_fetch_len " << temp_fetch_len
1148 << dendl;
1149 }
1150
1151 ldout(cct, 10) << "_is_readable: not readable, returning false" << dendl;
1152 return false;
1153 }
1154
1155 /*
1156 * is_readable() - kickstart prefetch, too
1157 */
1158 bool Journaler::is_readable()
1159 {
1160 lock_guard l(lock);
1161
1162 if (error != 0) {
1163 return false;
1164 }
1165
1166 bool r = readable;
1167 _prefetch();
1168 return r;
1169 }
1170
1171 class Journaler::C_EraseFinish : public Context {
1172 Journaler *journaler;
1173 C_OnFinisher *completion;
1174 public:
1175 C_EraseFinish(Journaler *j, C_OnFinisher *c) : journaler(j), completion(c) {}
1176 void finish(int r) override {
1177 journaler->_finish_erase(r, completion);
1178 }
1179 };
1180
1181 /**
1182 * Entirely erase the journal, including header. For use when you
1183 * have already made a copy of the journal somewhere else.
1184 */
1185 void Journaler::erase(Context *completion)
1186 {
1187 lock_guard l(lock);
1188
1189 // Async delete the journal data
1190 uint64_t first = trimmed_pos / get_layout_period();
1191 uint64_t num = (write_pos - trimmed_pos) / get_layout_period() + 2;
1192 filer.purge_range(ino, &layout, SnapContext(), first, num,
1193 ceph::real_clock::now(), 0,
1194 wrap_finisher(new C_EraseFinish(
1195 this, wrap_finisher(completion))));
1196
1197 // We will not start the operation to delete the header until
1198 // _finish_erase has seen the data deletion succeed: otherwise if
1199 // there was an error deleting data we might prematurely delete the
1200 // header thereby lose our reference to the data.
1201 }
1202
1203 void Journaler::_finish_erase(int data_result, C_OnFinisher *completion)
1204 {
1205 lock_guard l(lock);
1206 if (is_stopping()) {
1207 completion->complete(-EAGAIN);
1208 return;
1209 }
1210
1211 if (data_result == 0) {
1212 // Async delete the journal header
1213 filer.purge_range(ino, &layout, SnapContext(), 0, 1,
1214 ceph::real_clock::now(),
1215 0, wrap_finisher(completion));
1216 } else {
1217 lderr(cct) << "Failed to delete journal " << ino << " data: "
1218 << cpp_strerror(data_result) << dendl;
1219 completion->complete(data_result);
1220 }
1221 }
1222
1223 /* try_read_entry(bl)
1224 * read entry into bl if it's ready.
1225 * otherwise, do nothing.
1226 */
1227 bool Journaler::try_read_entry(bufferlist& bl)
1228 {
1229 lock_guard l(lock);
1230
1231 if (!readable) {
1232 ldout(cct, 10) << "try_read_entry at " << read_pos << " not readable"
1233 << dendl;
1234 return false;
1235 }
1236
1237 uint64_t start_ptr;
1238 size_t consumed;
1239 try {
1240 consumed = journal_stream.read(read_buf, &bl, &start_ptr);
1241 if (stream_format >= JOURNAL_FORMAT_RESILIENT) {
1242 ceph_assert(start_ptr == read_pos);
1243 }
1244 } catch (const buffer::error &e) {
1245 lderr(cct) << __func__ << ": decode error from journal_stream" << dendl;
1246 error = -EINVAL;
1247 return false;
1248 }
1249
1250 ldout(cct, 10) << "try_read_entry at " << read_pos << " read "
1251 << read_pos << "~" << consumed << " (have "
1252 << read_buf.length() << ")" << dendl;
1253
1254 read_pos += consumed;
1255 try {
1256 // We were readable, we might not be any more
1257 readable = _is_readable();
1258 } catch (const buffer::error &e) {
1259 lderr(cct) << __func__ << ": decode error from _is_readable" << dendl;
1260 error = -EINVAL;
1261 return false;
1262 }
1263
1264 // prefetch?
1265 _prefetch();
1266
1267 // If bufferlist consists of discontiguous memory, decoding types whose
1268 // denc_traits needs contiguous memory is inefficient. The bufferlist may
1269 // get copied to temporary memory multiple times (copy_shallow() in
1270 // src/include/denc.h actually does deep copy)
1271 if (bl.get_num_buffers() > 1)
1272 bl.rebuild();
1273 return true;
1274 }
1275
1276 void Journaler::wait_for_readable(Context *onreadable)
1277 {
1278 lock_guard l(lock);
1279 if (is_stopping()) {
1280 finisher->queue(onreadable, -EAGAIN);
1281 return;
1282 }
1283
1284 ceph_assert(on_readable == 0);
1285 if (!readable) {
1286 ldout(cct, 10) << "wait_for_readable at " << read_pos << " onreadable "
1287 << onreadable << dendl;
1288 on_readable = wrap_finisher(onreadable);
1289 } else {
1290 // race with OSD reply
1291 finisher->queue(onreadable, 0);
1292 }
1293 }
1294
1295 bool Journaler::have_waiter() const
1296 {
1297 return on_readable != nullptr;
1298 }
1299
1300
1301
1302
1303 /***************** TRIMMING *******************/
1304
1305
1306 class Journaler::C_Trim : public Context {
1307 Journaler *ls;
1308 uint64_t to;
1309 public:
1310 C_Trim(Journaler *l, int64_t t) : ls(l), to(t) {}
1311 void finish(int r) override {
1312 ls->_finish_trim(r, to);
1313 }
1314 };
1315
1316 void Journaler::trim()
1317 {
1318 lock_guard l(lock);
1319 _trim();
1320 }
1321
1322 void Journaler::_trim()
1323 {
1324 if (is_stopping())
1325 return;
1326
1327 ceph_assert(!readonly);
1328 uint64_t period = get_layout_period();
1329 uint64_t trim_to = last_committed.expire_pos;
1330 trim_to -= trim_to % period;
1331 ldout(cct, 10) << "trim last_commited head was " << last_committed
1332 << ", can trim to " << trim_to
1333 << dendl;
1334 if (trim_to == 0 || trim_to == trimming_pos) {
1335 ldout(cct, 10) << "trim already trimmed/trimming to "
1336 << trimmed_pos << "/" << trimming_pos << dendl;
1337 return;
1338 }
1339
1340 if (trimming_pos > trimmed_pos) {
1341 ldout(cct, 10) << "trim already trimming atm, try again later. "
1342 "trimmed/trimming is " << trimmed_pos << "/" << trimming_pos << dendl;
1343 return;
1344 }
1345
1346 // trim
1347 ceph_assert(trim_to <= write_pos);
1348 ceph_assert(trim_to <= expire_pos);
1349 ceph_assert(trim_to > trimming_pos);
1350 ldout(cct, 10) << "trim trimming to " << trim_to
1351 << ", trimmed/trimming/expire are "
1352 << trimmed_pos << "/" << trimming_pos << "/" << expire_pos
1353 << dendl;
1354
1355 // delete range of objects
1356 uint64_t first = trimming_pos / period;
1357 uint64_t num = (trim_to - trimming_pos) / period;
1358 SnapContext snapc;
1359 filer.purge_range(ino, &layout, snapc, first, num,
1360 ceph::real_clock::now(), 0,
1361 wrap_finisher(new C_Trim(this, trim_to)));
1362 trimming_pos = trim_to;
1363 }
1364
1365 void Journaler::_finish_trim(int r, uint64_t to)
1366 {
1367 lock_guard l(lock);
1368
1369 ceph_assert(!readonly);
1370 ldout(cct, 10) << "_finish_trim trimmed_pos was " << trimmed_pos
1371 << ", trimmed/trimming/expire now "
1372 << to << "/" << trimming_pos << "/" << expire_pos
1373 << dendl;
1374 if (r < 0 && r != -ENOENT) {
1375 lderr(cct) << "_finish_trim got " << cpp_strerror(r) << dendl;
1376 handle_write_error(r);
1377 return;
1378 }
1379
1380 ceph_assert(r >= 0 || r == -ENOENT);
1381
1382 ceph_assert(to <= trimming_pos);
1383 ceph_assert(to > trimmed_pos);
1384 trimmed_pos = to;
1385 }
1386
1387 void Journaler::handle_write_error(int r)
1388 {
1389 // lock is locked
1390
1391 lderr(cct) << "handle_write_error " << cpp_strerror(r) << dendl;
1392 if (on_write_error) {
1393 on_write_error->complete(r);
1394 on_write_error = NULL;
1395 called_write_error = true;
1396 } else if (called_write_error) {
1397 /* We don't call error handler more than once, subsequent errors
1398 * are dropped -- this is okay as long as the error handler does
1399 * something dramatic like respawn */
1400 lderr(cct) << __func__ << ": multiple write errors, handler already called"
1401 << dendl;
1402 } else {
1403 ceph_abort_msg("unhandled write error");
1404 }
1405 }
1406
1407
1408 /**
1409 * Test whether the 'read_buf' byte stream has enough data to read
1410 * an entry
1411 *
1412 * sets 'next_envelope_size' to the number of bytes needed to advance (enough
1413 * to get the next header if header was unavailable, or enough to get the whole
1414 * next entry if the header was available but the body wasn't).
1415 */
1416 bool JournalStream::readable(bufferlist &read_buf, uint64_t *need) const
1417 {
1418 ceph_assert(need != NULL);
1419
1420 uint32_t entry_size = 0;
1421 uint64_t entry_sentinel = 0;
1422 auto p = read_buf.cbegin();
1423
1424 // Do we have enough data to decode an entry prefix?
1425 if (format >= JOURNAL_FORMAT_RESILIENT) {
1426 *need = sizeof(entry_size) + sizeof(entry_sentinel);
1427 } else {
1428 *need = sizeof(entry_size);
1429 }
1430 if (read_buf.length() >= *need) {
1431 if (format >= JOURNAL_FORMAT_RESILIENT) {
1432 decode(entry_sentinel, p);
1433 if (entry_sentinel != sentinel) {
1434 throw buffer::malformed_input("Invalid sentinel");
1435 }
1436 }
1437
1438 decode(entry_size, p);
1439 } else {
1440 return false;
1441 }
1442
1443 // Do we have enough data to decode an entry prefix, payload and suffix?
1444 if (format >= JOURNAL_FORMAT_RESILIENT) {
1445 *need = JOURNAL_ENVELOPE_RESILIENT + entry_size;
1446 } else {
1447 *need = JOURNAL_ENVELOPE_LEGACY + entry_size;
1448 }
1449 if (read_buf.length() >= *need) {
1450 return true; // No more bytes needed
1451 }
1452
1453 return false;
1454 }
1455
1456
1457 /**
1458 * Consume one entry from a journal byte stream 'from', splicing a
1459 * serialized LogEvent blob into 'entry'.
1460 *
1461 * 'entry' must be non null and point to an empty bufferlist.
1462 *
1463 * 'from' must contain sufficient valid data (i.e. readable is true).
1464 *
1465 * 'start_ptr' will be set to the entry's start pointer, if the collection
1466 * format provides it. It may not be null.
1467 *
1468 * @returns The number of bytes consumed from the `from` byte stream. Note
1469 * that this is not equal to the length of `entry`, which contains
1470 * the inner serialized LogEvent and not the envelope.
1471 */
1472 size_t JournalStream::read(bufferlist &from, bufferlist *entry,
1473 uint64_t *start_ptr)
1474 {
1475 ceph_assert(start_ptr != NULL);
1476 ceph_assert(entry != NULL);
1477 ceph_assert(entry->length() == 0);
1478
1479 uint32_t entry_size = 0;
1480
1481 // Consume envelope prefix: entry_size and entry_sentinel
1482 auto from_ptr = from.cbegin();
1483 if (format >= JOURNAL_FORMAT_RESILIENT) {
1484 uint64_t entry_sentinel = 0;
1485 decode(entry_sentinel, from_ptr);
1486 // Assertion instead of clean check because of precondition of this
1487 // fn is that readable() already passed
1488 ceph_assert(entry_sentinel == sentinel);
1489 }
1490 decode(entry_size, from_ptr);
1491
1492 // Read out the payload
1493 from_ptr.copy(entry_size, *entry);
1494
1495 // Consume the envelope suffix (start_ptr)
1496 if (format >= JOURNAL_FORMAT_RESILIENT) {
1497 decode(*start_ptr, from_ptr);
1498 } else {
1499 *start_ptr = 0;
1500 }
1501
1502 // Trim the input buffer to discard the bytes we have consumed
1503 from.splice(0, from_ptr.get_off());
1504
1505 return from_ptr.get_off();
1506 }
1507
1508
1509 /**
1510 * Append one entry
1511 */
1512 size_t JournalStream::write(bufferlist &entry, bufferlist *to,
1513 uint64_t const &start_ptr)
1514 {
1515 ceph_assert(to != NULL);
1516
1517 uint32_t const entry_size = entry.length();
1518 if (format >= JOURNAL_FORMAT_RESILIENT) {
1519 encode(sentinel, *to);
1520 }
1521 encode(entry_size, *to);
1522 to->claim_append(entry);
1523 if (format >= JOURNAL_FORMAT_RESILIENT) {
1524 encode(start_ptr, *to);
1525 }
1526
1527 if (format >= JOURNAL_FORMAT_RESILIENT) {
1528 return JOURNAL_ENVELOPE_RESILIENT + entry_size;
1529 } else {
1530 return JOURNAL_ENVELOPE_LEGACY + entry_size;
1531 }
1532 }
1533
1534 /**
1535 * set write error callback
1536 *
1537 * Set a callback/context to trigger if we get a write error from
1538 * the objecter. This may be from an explicit request (e.g., flush)
1539 * or something async the journaler did on its own (e.g., journal
1540 * header update).
1541 *
1542 * It is only used once; if the caller continues to use the
1543 * Journaler and wants to hear about errors, it needs to reset the
1544 * error_handler.
1545 *
1546 * @param c callback/context to trigger on error
1547 */
1548 void Journaler::set_write_error_handler(Context *c) {
1549 lock_guard l(lock);
1550 ceph_assert(!on_write_error);
1551 on_write_error = wrap_finisher(c);
1552 called_write_error = false;
1553 }
1554
1555
1556 /**
1557 * Wrap a context in a C_OnFinisher, if it is non-NULL
1558 *
1559 * Utility function to avoid lots of error-prone and verbose
1560 * NULL checking on contexts passed in.
1561 */
1562 C_OnFinisher *Journaler::wrap_finisher(Context *c)
1563 {
1564 if (c != NULL) {
1565 return new C_OnFinisher(c, finisher);
1566 } else {
1567 return NULL;
1568 }
1569 }
1570
1571 void Journaler::shutdown()
1572 {
1573 lock_guard l(lock);
1574
1575 ldout(cct, 1) << __func__ << dendl;
1576
1577 state = STATE_STOPPING;
1578 readable = false;
1579
1580 // Kick out anyone reading from journal
1581 error = -EAGAIN;
1582 if (on_readable) {
1583 C_OnFinisher *f = on_readable;
1584 on_readable = 0;
1585 f->complete(-EAGAIN);
1586 }
1587
1588 list<Context*> ls;
1589 ls.swap(waitfor_recover);
1590 finish_contexts(cct, ls, -ESHUTDOWN);
1591
1592 std::map<uint64_t, std::list<Context*> >::iterator i;
1593 for (i = waitfor_safe.begin(); i != waitfor_safe.end(); ++i) {
1594 finish_contexts(cct, i->second, -EAGAIN);
1595 }
1596 waitfor_safe.clear();
1597 }
1598