]> git.proxmox.com Git - ceph.git/blob - ceph/src/osdc/Journaler.cc
update sources to 12.2.7
[ceph.git] / ceph / src / osdc / Journaler.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include "common/perf_counters.h"
16 #include "common/dout.h"
17 #include "include/Context.h"
18 #include "msg/Messenger.h"
19 #include "osdc/Journaler.h"
20 #include "common/errno.h"
21 #include "include/assert.h"
22 #include "common/Finisher.h"
23
24 #define dout_subsys ceph_subsys_journaler
25 #undef dout_prefix
26 #define dout_prefix *_dout << objecter->messenger->get_myname() \
27 << ".journaler." << name << (readonly ? "(ro) ":"(rw) ")
28
29 using std::chrono::seconds;
30
31
32 class Journaler::C_DelayFlush : public Context {
33 Journaler *journaler;
34 public:
35 C_DelayFlush(Journaler *j) : journaler(j) {}
36 void finish(int r) override {
37 journaler->_do_delayed_flush();
38 }
39 };
40
41 void Journaler::set_readonly()
42 {
43 lock_guard l(lock);
44
45 ldout(cct, 1) << "set_readonly" << dendl;
46 readonly = true;
47 }
48
49 void Journaler::set_writeable()
50 {
51 lock_guard l(lock);
52
53 ldout(cct, 1) << "set_writeable" << dendl;
54 readonly = false;
55 }
56
57 void Journaler::create(file_layout_t *l, stream_format_t const sf)
58 {
59 lock_guard lk(lock);
60
61 assert(!readonly);
62 state = STATE_ACTIVE;
63
64 stream_format = sf;
65 journal_stream.set_format(sf);
66 _set_layout(l);
67
68 prezeroing_pos = prezero_pos = write_pos = flush_pos =
69 safe_pos = read_pos = requested_pos = received_pos =
70 expire_pos = trimming_pos = trimmed_pos =
71 next_safe_pos = layout.get_period();
72
73 ldout(cct, 1) << "created blank journal at inode 0x" << std::hex << ino
74 << std::dec << ", format=" << stream_format << dendl;
75 }
76
77 void Journaler::set_layout(file_layout_t const *l)
78 {
79 lock_guard lk(lock);
80 _set_layout(l);
81 }
82
83 void Journaler::_set_layout(file_layout_t const *l)
84 {
85 layout = *l;
86
87 if (layout.pool_id != pg_pool) {
88 // user can reset pool id through cephfs-journal-tool
89 lderr(cct) << "may got older pool id from header layout" << dendl;
90 ceph_abort();
91 }
92 last_written.layout = layout;
93 last_committed.layout = layout;
94
95 // prefetch intelligently.
96 // (watch out, this is big if you use big objects or weird striping)
97 uint64_t periods = cct->_conf->journaler_prefetch_periods;
98 if (periods < 2)
99 periods = 2; // we need at least 2 periods to make progress.
100 fetch_len = layout.get_period() * periods;
101 }
102
103
104 /***************** HEADER *******************/
105
106 ostream& operator<<(ostream &out, const Journaler::Header &h)
107 {
108 return out << "loghead(trim " << h.trimmed_pos
109 << ", expire " << h.expire_pos
110 << ", write " << h.write_pos
111 << ", stream_format " << (int)(h.stream_format)
112 << ")";
113 }
114
115 class Journaler::C_ReadHead : public Context {
116 Journaler *ls;
117 public:
118 bufferlist bl;
119 explicit C_ReadHead(Journaler *l) : ls(l) {}
120 void finish(int r) override {
121 ls->_finish_read_head(r, bl);
122 }
123 };
124
125 class Journaler::C_RereadHead : public Context {
126 Journaler *ls;
127 Context *onfinish;
128 public:
129 bufferlist bl;
130 C_RereadHead(Journaler *l, Context *onfinish_) : ls (l),
131 onfinish(onfinish_) {}
132 void finish(int r) override {
133 ls->_finish_reread_head(r, bl, onfinish);
134 }
135 };
136
137 class Journaler::C_ProbeEnd : public Context {
138 Journaler *ls;
139 public:
140 uint64_t end;
141 explicit C_ProbeEnd(Journaler *l) : ls(l), end(-1) {}
142 void finish(int r) override {
143 ls->_finish_probe_end(r, end);
144 }
145 };
146
147 class Journaler::C_ReProbe : public Context {
148 Journaler *ls;
149 C_OnFinisher *onfinish;
150 public:
151 uint64_t end;
152 C_ReProbe(Journaler *l, C_OnFinisher *onfinish_) :
153 ls(l), onfinish(onfinish_), end(0) {}
154 void finish(int r) override {
155 ls->_finish_reprobe(r, end, onfinish);
156 }
157 };
158
159 void Journaler::recover(Context *onread)
160 {
161 lock_guard l(lock);
162 if (is_stopping()) {
163 onread->complete(-EAGAIN);
164 return;
165 }
166
167 ldout(cct, 1) << "recover start" << dendl;
168 assert(state != STATE_ACTIVE);
169 assert(readonly);
170
171 if (onread)
172 waitfor_recover.push_back(wrap_finisher(onread));
173
174 if (state != STATE_UNDEF) {
175 ldout(cct, 1) << "recover - already recovering" << dendl;
176 return;
177 }
178
179 ldout(cct, 1) << "read_head" << dendl;
180 state = STATE_READHEAD;
181 C_ReadHead *fin = new C_ReadHead(this);
182 _read_head(fin, &fin->bl);
183 }
184
185 void Journaler::_read_head(Context *on_finish, bufferlist *bl)
186 {
187 // lock is locked
188 assert(state == STATE_READHEAD || state == STATE_REREADHEAD);
189
190 object_t oid = file_object_t(ino, 0);
191 object_locator_t oloc(pg_pool);
192 objecter->read_full(oid, oloc, CEPH_NOSNAP, bl, 0, wrap_finisher(on_finish));
193 }
194
195 void Journaler::reread_head(Context *onfinish)
196 {
197 lock_guard l(lock);
198 _reread_head(wrap_finisher(onfinish));
199 }
200
201 /**
202 * Re-read the head from disk, and set the write_pos, expire_pos, trimmed_pos
203 * from the on-disk header. This switches the state to STATE_REREADHEAD for
204 * the duration, and you shouldn't start a re-read while other operations are
205 * in-flight, nor start other operations while a re-read is in progress.
206 * Also, don't call this until the Journaler has finished its recovery and has
207 * gone STATE_ACTIVE!
208 */
209 void Journaler::_reread_head(Context *onfinish)
210 {
211 ldout(cct, 10) << "reread_head" << dendl;
212 assert(state == STATE_ACTIVE);
213
214 state = STATE_REREADHEAD;
215 C_RereadHead *fin = new C_RereadHead(this, onfinish);
216 _read_head(fin, &fin->bl);
217 }
218
219 void Journaler::_finish_reread_head(int r, bufferlist& bl, Context *finish)
220 {
221 lock_guard l(lock);
222 if (is_stopping()) {
223 finish->complete(-EAGAIN);
224 return;
225 }
226
227 //read on-disk header into
228 assert(bl.length() || r < 0 );
229
230 // unpack header
231 if (r == 0) {
232 Header h;
233 bufferlist::iterator p = bl.begin();
234 try {
235 ::decode(h, p);
236 } catch (const buffer::error &e) {
237 finish->complete(-EINVAL);
238 return;
239 }
240 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos
241 = h.write_pos;
242 expire_pos = h.expire_pos;
243 trimmed_pos = trimming_pos = h.trimmed_pos;
244 init_headers(h);
245 state = STATE_ACTIVE;
246 }
247
248 finish->complete(r);
249 }
250
251 void Journaler::_finish_read_head(int r, bufferlist& bl)
252 {
253 lock_guard l(lock);
254 if (is_stopping())
255 return;
256
257 assert(state == STATE_READHEAD);
258
259 if (r!=0) {
260 ldout(cct, 0) << "error getting journal off disk" << dendl;
261 list<Context*> ls;
262 ls.swap(waitfor_recover);
263 finish_contexts(cct, ls, r);
264 return;
265 }
266
267 if (bl.length() == 0) {
268 ldout(cct, 1) << "_finish_read_head r=" << r
269 << " read 0 bytes, assuming empty log" << dendl;
270 state = STATE_ACTIVE;
271 list<Context*> ls;
272 ls.swap(waitfor_recover);
273 finish_contexts(cct, ls, 0);
274 return;
275 }
276
277 // unpack header
278 bool corrupt = false;
279 Header h;
280 bufferlist::iterator p = bl.begin();
281 try {
282 ::decode(h, p);
283
284 if (h.magic != magic) {
285 ldout(cct, 0) << "on disk magic '" << h.magic << "' != my magic '"
286 << magic << "'" << dendl;
287 corrupt = true;
288 } else if (h.write_pos < h.expire_pos || h.expire_pos < h.trimmed_pos) {
289 ldout(cct, 0) << "Corrupt header (bad offsets): " << h << dendl;
290 corrupt = true;
291 }
292 } catch (const buffer::error &e) {
293 corrupt = true;
294 }
295
296 if (corrupt) {
297 list<Context*> ls;
298 ls.swap(waitfor_recover);
299 finish_contexts(cct, ls, -EINVAL);
300 return;
301 }
302
303 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos
304 = h.write_pos;
305 read_pos = requested_pos = received_pos = expire_pos = h.expire_pos;
306 trimmed_pos = trimming_pos = h.trimmed_pos;
307
308 init_headers(h);
309 _set_layout(&h.layout);
310 stream_format = h.stream_format;
311 journal_stream.set_format(h.stream_format);
312
313 ldout(cct, 1) << "_finish_read_head " << h
314 << ". probing for end of log (from " << write_pos << ")..."
315 << dendl;
316 C_ProbeEnd *fin = new C_ProbeEnd(this);
317 state = STATE_PROBING;
318 _probe(fin, &fin->end);
319 }
320
321 void Journaler::_probe(Context *finish, uint64_t *end)
322 {
323 // lock is locked
324 ldout(cct, 1) << "probing for end of the log" << dendl;
325 assert(state == STATE_PROBING || state == STATE_REPROBING);
326 // probe the log
327 filer.probe(ino, &layout, CEPH_NOSNAP,
328 write_pos, end, true, 0, wrap_finisher(finish));
329 }
330
331 void Journaler::_reprobe(C_OnFinisher *finish)
332 {
333 ldout(cct, 10) << "reprobe" << dendl;
334 assert(state == STATE_ACTIVE);
335
336 state = STATE_REPROBING;
337 C_ReProbe *fin = new C_ReProbe(this, finish);
338 _probe(fin, &fin->end);
339 }
340
341
342 void Journaler::_finish_reprobe(int r, uint64_t new_end,
343 C_OnFinisher *onfinish)
344 {
345 lock_guard l(lock);
346 if (is_stopping()) {
347 onfinish->complete(-EAGAIN);
348 return;
349 }
350
351 assert(new_end >= write_pos || r < 0);
352 ldout(cct, 1) << "_finish_reprobe new_end = " << new_end
353 << " (header had " << write_pos << ")."
354 << dendl;
355 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = new_end;
356 state = STATE_ACTIVE;
357 onfinish->complete(r);
358 }
359
360 void Journaler::_finish_probe_end(int r, uint64_t end)
361 {
362 lock_guard l(lock);
363 if (is_stopping())
364 return;
365
366 assert(state == STATE_PROBING);
367 if (r < 0) { // error in probing
368 goto out;
369 }
370 if (((int64_t)end) == -1) {
371 end = write_pos;
372 ldout(cct, 1) << "_finish_probe_end write_pos = " << end << " (header had "
373 << write_pos << "). log was empty. recovered." << dendl;
374 ceph_abort(); // hrm.
375 } else {
376 assert(end >= write_pos);
377 ldout(cct, 1) << "_finish_probe_end write_pos = " << end
378 << " (header had " << write_pos << "). recovered."
379 << dendl;
380 }
381
382 state = STATE_ACTIVE;
383
384 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = end;
385
386 out:
387 // done.
388 list<Context*> ls;
389 ls.swap(waitfor_recover);
390 finish_contexts(cct, ls, r);
391 }
392
393 class Journaler::C_RereadHeadProbe : public Context
394 {
395 Journaler *ls;
396 C_OnFinisher *final_finish;
397 public:
398 C_RereadHeadProbe(Journaler *l, C_OnFinisher *finish) :
399 ls(l), final_finish(finish) {}
400 void finish(int r) override {
401 ls->_finish_reread_head_and_probe(r, final_finish);
402 }
403 };
404
405 void Journaler::reread_head_and_probe(Context *onfinish)
406 {
407 lock_guard l(lock);
408
409 assert(state == STATE_ACTIVE);
410 _reread_head(new C_RereadHeadProbe(this, wrap_finisher(onfinish)));
411 }
412
413 void Journaler::_finish_reread_head_and_probe(int r, C_OnFinisher *onfinish)
414 {
415 // Expect to be called back from finish_reread_head, which already takes lock
416 // lock is locked
417 if (is_stopping()) {
418 onfinish->complete(-EAGAIN);
419 return;
420 }
421
422 assert(!r); //if we get an error, we're boned
423 _reprobe(onfinish);
424 }
425
426
427 // WRITING
428
429 class Journaler::C_WriteHead : public Context {
430 public:
431 Journaler *ls;
432 Header h;
433 C_OnFinisher *oncommit;
434 C_WriteHead(Journaler *l, Header& h_, C_OnFinisher *c) : ls(l), h(h_),
435 oncommit(c) {}
436 void finish(int r) override {
437 ls->_finish_write_head(r, h, oncommit);
438 }
439 };
440
441 void Journaler::write_head(Context *oncommit)
442 {
443 lock_guard l(lock);
444 _write_head(oncommit);
445 }
446
447
448 void Journaler::_write_head(Context *oncommit)
449 {
450 assert(!readonly);
451 assert(state == STATE_ACTIVE);
452 last_written.trimmed_pos = trimmed_pos;
453 last_written.expire_pos = expire_pos;
454 last_written.unused_field = expire_pos;
455 last_written.write_pos = safe_pos;
456 last_written.stream_format = stream_format;
457 ldout(cct, 10) << "write_head " << last_written << dendl;
458
459 // Avoid persisting bad pointers in case of bugs
460 assert(last_written.write_pos >= last_written.expire_pos);
461 assert(last_written.expire_pos >= last_written.trimmed_pos);
462
463 last_wrote_head = ceph::real_clock::now();
464
465 bufferlist bl;
466 ::encode(last_written, bl);
467 SnapContext snapc;
468
469 object_t oid = file_object_t(ino, 0);
470 object_locator_t oloc(pg_pool);
471 objecter->write_full(oid, oloc, snapc, bl, ceph::real_clock::now(), 0,
472 wrap_finisher(new C_WriteHead(
473 this, last_written,
474 wrap_finisher(oncommit))),
475 0, 0, write_iohint);
476 }
477
478 void Journaler::_finish_write_head(int r, Header &wrote,
479 C_OnFinisher *oncommit)
480 {
481 lock_guard l(lock);
482
483 if (r < 0) {
484 lderr(cct) << "_finish_write_head got " << cpp_strerror(r) << dendl;
485 handle_write_error(r);
486 return;
487 }
488 assert(!readonly);
489 ldout(cct, 10) << "_finish_write_head " << wrote << dendl;
490 last_committed = wrote;
491 if (oncommit) {
492 oncommit->complete(r);
493 }
494
495 _trim(); // trim?
496 }
497
498
499 /***************** WRITING *******************/
500
501 class Journaler::C_Flush : public Context {
502 Journaler *ls;
503 uint64_t start;
504 ceph::real_time stamp;
505 public:
506 C_Flush(Journaler *l, int64_t s, ceph::real_time st)
507 : ls(l), start(s), stamp(st) {}
508 void finish(int r) override {
509 ls->_finish_flush(r, start, stamp);
510 }
511 };
512
513 void Journaler::_finish_flush(int r, uint64_t start, ceph::real_time stamp)
514 {
515 lock_guard l(lock);
516 assert(!readonly);
517
518 if (r < 0) {
519 lderr(cct) << "_finish_flush got " << cpp_strerror(r) << dendl;
520 handle_write_error(r);
521 return;
522 }
523
524 assert(start < flush_pos);
525
526 // calc latency?
527 if (logger) {
528 ceph::timespan lat = ceph::real_clock::now() - stamp;
529 logger->tinc(logger_key_lat, lat);
530 }
531
532 // adjust safe_pos
533 auto it = pending_safe.find(start);
534 assert(it != pending_safe.end());
535 pending_safe.erase(it);
536 if (pending_safe.empty())
537 safe_pos = next_safe_pos;
538 else
539 safe_pos = pending_safe.begin()->second;
540
541 ldout(cct, 10) << "_finish_flush safe from " << start
542 << ", pending_safe " << pending_safe
543 << ", (prezeroing/prezero)/write/flush/safe positions now "
544 << "(" << prezeroing_pos << "/" << prezero_pos << ")/"
545 << write_pos << "/" << flush_pos << "/" << safe_pos
546 << dendl;
547
548 // kick waiters <= safe_pos
549 if (!waitfor_safe.empty()) {
550 list<Context*> ls;
551 while (!waitfor_safe.empty()) {
552 auto it = waitfor_safe.begin();
553 if (it->first > safe_pos)
554 break;
555 ls.splice(ls.end(), it->second);
556 waitfor_safe.erase(it);
557 }
558 finish_contexts(cct, ls);
559 }
560 }
561
562
563
564 uint64_t Journaler::append_entry(bufferlist& bl)
565 {
566 unique_lock l(lock);
567
568 assert(!readonly);
569 uint32_t s = bl.length();
570
571 // append
572 size_t delta = bl.length() + journal_stream.get_envelope_size();
573 // write_buf space is nearly full
574 if (!write_buf_throttle.get_or_fail(delta)) {
575 l.unlock();
576 ldout(cct, 10) << "write_buf_throttle wait, delta " << delta << dendl;
577 write_buf_throttle.get(delta);
578 l.lock();
579 }
580 ldout(cct, 20) << "write_buf_throttle get, delta " << delta << dendl;
581 size_t wrote = journal_stream.write(bl, &write_buf, write_pos);
582 ldout(cct, 10) << "append_entry len " << s << " to " << write_pos << "~"
583 << wrote << dendl;
584 write_pos += wrote;
585
586 // flush previous object?
587 uint64_t su = get_layout_period();
588 assert(su > 0);
589 uint64_t write_off = write_pos % su;
590 uint64_t write_obj = write_pos / su;
591 uint64_t flush_obj = flush_pos / su;
592 if (write_obj != flush_obj) {
593 ldout(cct, 10) << " flushing completed object(s) (su " << su << " wro "
594 << write_obj << " flo " << flush_obj << ")" << dendl;
595 _do_flush(write_buf.length() - write_off);
596
597 // if _do_flush() skips flushing some data, it does do a best effort to
598 // update next_safe_pos.
599 if (write_buf.length() > 0 &&
600 write_buf.length() <= wrote) { // the unflushed data are within this entry
601 // set next_safe_pos to end of previous entry
602 next_safe_pos = write_pos - wrote;
603 }
604 }
605
606 return write_pos;
607 }
608
609
610 void Journaler::_do_flush(unsigned amount)
611 {
612 if (is_stopping())
613 return;
614 if (write_pos == flush_pos)
615 return;
616 assert(write_pos > flush_pos);
617 assert(!readonly);
618
619 // flush
620 uint64_t len = write_pos - flush_pos;
621 assert(len == write_buf.length());
622 if (amount && amount < len)
623 len = amount;
624
625 // zero at least two full periods ahead. this ensures
626 // that the next object will not exist.
627 uint64_t period = get_layout_period();
628 if (flush_pos + len + 2*period > prezero_pos) {
629 _issue_prezero();
630
631 int64_t newlen = prezero_pos - flush_pos - period;
632 if (newlen <= 0) {
633 ldout(cct, 10) << "_do_flush wanted to do " << flush_pos << "~" << len
634 << " already too close to prezero_pos " << prezero_pos
635 << ", zeroing first" << dendl;
636 waiting_for_zero_pos = flush_pos + len;
637 return;
638 }
639 if (static_cast<uint64_t>(newlen) < len) {
640 ldout(cct, 10) << "_do_flush wanted to do " << flush_pos << "~" << len
641 << " but hit prezero_pos " << prezero_pos
642 << ", will do " << flush_pos << "~" << newlen << dendl;
643 waiting_for_zero_pos = flush_pos + len;
644 len = newlen;
645 }
646 }
647 ldout(cct, 10) << "_do_flush flushing " << flush_pos << "~" << len << dendl;
648
649 // submit write for anything pending
650 // flush _start_ pos to _finish_flush
651 ceph::real_time now = ceph::real_clock::now();
652 SnapContext snapc;
653
654 Context *onsafe = new C_Flush(this, flush_pos, now); // on COMMIT
655 pending_safe[flush_pos] = next_safe_pos;
656
657 bufferlist write_bl;
658
659 // adjust pointers
660 if (len == write_buf.length()) {
661 write_bl.swap(write_buf);
662 next_safe_pos = write_pos;
663 } else {
664 write_buf.splice(0, len, &write_bl);
665 // Keys of waitfor_safe map are journal entry boundaries.
666 // Try finding a journal entry that we are actually flushing
667 // and set next_safe_pos to end of it. This is best effort.
668 // The one we found may not be the lastest flushing entry.
669 auto p = waitfor_safe.lower_bound(flush_pos + len);
670 if (p != waitfor_safe.end()) {
671 if (p->first > flush_pos + len && p != waitfor_safe.begin())
672 --p;
673 if (p->first <= flush_pos + len && p->first > next_safe_pos)
674 next_safe_pos = p->first;
675 }
676 }
677
678 filer.write(ino, &layout, snapc,
679 flush_pos, len, write_bl, ceph::real_clock::now(),
680 0,
681 wrap_finisher(onsafe), write_iohint);
682
683 flush_pos += len;
684 assert(write_buf.length() == write_pos - flush_pos);
685 write_buf_throttle.put(len);
686 ldout(cct, 20) << "write_buf_throttle put, len " << len << dendl;
687
688 ldout(cct, 10)
689 << "_do_flush (prezeroing/prezero)/write/flush/safe pointers now at "
690 << "(" << prezeroing_pos << "/" << prezero_pos << ")/" << write_pos
691 << "/" << flush_pos << "/" << safe_pos << dendl;
692
693 _issue_prezero();
694 }
695
696
697 void Journaler::wait_for_flush(Context *onsafe)
698 {
699 lock_guard l(lock);
700 if (is_stopping()) {
701 onsafe->complete(-EAGAIN);
702 return;
703 }
704 _wait_for_flush(onsafe);
705 }
706
707 void Journaler::_wait_for_flush(Context *onsafe)
708 {
709 assert(!readonly);
710
711 // all flushed and safe?
712 if (write_pos == safe_pos) {
713 assert(write_buf.length() == 0);
714 ldout(cct, 10)
715 << "flush nothing to flush, (prezeroing/prezero)/write/flush/safe "
716 "pointers at " << "(" << prezeroing_pos << "/" << prezero_pos << ")/"
717 << write_pos << "/" << flush_pos << "/" << safe_pos << dendl;
718 if (onsafe) {
719 finisher->queue(onsafe, 0);
720 }
721 return;
722 }
723
724 // queue waiter
725 if (onsafe) {
726 waitfor_safe[write_pos].push_back(wrap_finisher(onsafe));
727 }
728 }
729
730 void Journaler::flush(Context *onsafe)
731 {
732 lock_guard l(lock);
733 if (is_stopping()) {
734 onsafe->complete(-EAGAIN);
735 return;
736 }
737 _flush(wrap_finisher(onsafe));
738 }
739
740 void Journaler::_flush(C_OnFinisher *onsafe)
741 {
742 assert(!readonly);
743
744 if (write_pos == flush_pos) {
745 assert(write_buf.length() == 0);
746 ldout(cct, 10) << "flush nothing to flush, (prezeroing/prezero)/write/"
747 "flush/safe pointers at " << "(" << prezeroing_pos << "/" << prezero_pos
748 << ")/" << write_pos << "/" << flush_pos << "/" << safe_pos
749 << dendl;
750 if (onsafe) {
751 onsafe->complete(0);
752 }
753 } else {
754 _do_flush();
755 _wait_for_flush(onsafe);
756 }
757
758 // write head?
759 if (_write_head_needed()) {
760 _write_head();
761 }
762 }
763
764 bool Journaler::_write_head_needed()
765 {
766 return last_wrote_head + seconds(cct->_conf->journaler_write_head_interval)
767 < ceph::real_clock::now();
768 }
769
770
771 /*************** prezeroing ******************/
772
773 struct C_Journaler_Prezero : public Context {
774 Journaler *journaler;
775 uint64_t from, len;
776 C_Journaler_Prezero(Journaler *j, uint64_t f, uint64_t l)
777 : journaler(j), from(f), len(l) {}
778 void finish(int r) override {
779 journaler->_finish_prezero(r, from, len);
780 }
781 };
782
783 void Journaler::_issue_prezero()
784 {
785 assert(prezeroing_pos >= flush_pos);
786
787 // we need to zero at least two periods, minimum, to ensure that we
788 // have a full empty object/period in front of us.
789 uint64_t num_periods = MAX(2, cct->_conf->journaler_prezero_periods);
790
791 /*
792 * issue zero requests based on write_pos, even though the invariant
793 * is that we zero ahead of flush_pos.
794 */
795 uint64_t period = get_layout_period();
796 uint64_t to = write_pos + period * num_periods + period - 1;
797 to -= to % period;
798
799 if (prezeroing_pos >= to) {
800 ldout(cct, 20) << "_issue_prezero target " << to << " <= prezeroing_pos "
801 << prezeroing_pos << dendl;
802 return;
803 }
804
805 while (prezeroing_pos < to) {
806 uint64_t len;
807 if (prezeroing_pos % period == 0) {
808 len = period;
809 ldout(cct, 10) << "_issue_prezero removing " << prezeroing_pos << "~"
810 << period << " (full period)" << dendl;
811 } else {
812 len = period - (prezeroing_pos % period);
813 ldout(cct, 10) << "_issue_prezero zeroing " << prezeroing_pos << "~"
814 << len << " (partial period)" << dendl;
815 }
816 SnapContext snapc;
817 Context *c = wrap_finisher(new C_Journaler_Prezero(this, prezeroing_pos,
818 len));
819 filer.zero(ino, &layout, snapc, prezeroing_pos, len,
820 ceph::real_clock::now(), 0, c);
821 prezeroing_pos += len;
822 }
823 }
824
825 // Lock cycle because we get called out of objecter callback (holding
826 // objecter read lock), but there are also cases where we take the journaler
827 // lock before calling into objecter to do I/O.
828 void Journaler::_finish_prezero(int r, uint64_t start, uint64_t len)
829 {
830 lock_guard l(lock);
831
832 ldout(cct, 10) << "_prezeroed to " << start << "~" << len
833 << ", prezeroing/prezero was " << prezeroing_pos << "/"
834 << prezero_pos << ", pending " << pending_zero
835 << dendl;
836 if (r < 0 && r != -ENOENT) {
837 lderr(cct) << "_prezeroed got " << cpp_strerror(r) << dendl;
838 handle_write_error(r);
839 return;
840 }
841
842 assert(r == 0 || r == -ENOENT);
843
844 if (start == prezero_pos) {
845 prezero_pos += len;
846 while (!pending_zero.empty() &&
847 pending_zero.begin().get_start() == prezero_pos) {
848 interval_set<uint64_t>::iterator b(pending_zero.begin());
849 prezero_pos += b.get_len();
850 pending_zero.erase(b);
851 }
852
853 if (waiting_for_zero_pos > flush_pos) {
854 _do_flush(waiting_for_zero_pos - flush_pos);
855 }
856
857 if (prezero_pos == prezeroing_pos &&
858 !waitfor_prezero.empty()) {
859 list<Context*> ls;
860 ls.swap(waitfor_prezero);
861 finish_contexts(cct, ls, 0);
862 }
863 } else {
864 pending_zero.insert(start, len);
865 }
866 ldout(cct, 10) << "_prezeroed prezeroing/prezero now " << prezeroing_pos
867 << "/" << prezero_pos
868 << ", pending " << pending_zero
869 << dendl;
870 }
871
872 void Journaler::wait_for_prezero(Context *onfinish)
873 {
874 assert(onfinish);
875 lock_guard l(lock);
876
877 if (prezero_pos == prezeroing_pos) {
878 finisher->queue(onfinish, 0);
879 return;
880 }
881 waitfor_prezero.push_back(wrap_finisher(onfinish));
882 }
883
884
885 /***************** READING *******************/
886
887
888 class Journaler::C_Read : public Context {
889 Journaler *ls;
890 uint64_t offset;
891 uint64_t length;
892 public:
893 bufferlist bl;
894 C_Read(Journaler *j, uint64_t o, uint64_t l) : ls(j), offset(o), length(l) {}
895 void finish(int r) override {
896 ls->_finish_read(r, offset, length, bl);
897 }
898 };
899
900 class Journaler::C_RetryRead : public Context {
901 Journaler *ls;
902 public:
903 explicit C_RetryRead(Journaler *l) : ls(l) {}
904
905 void finish(int r) override {
906 // Should only be called from waitfor_safe i.e. already inside lock
907 // (ls->lock is locked
908 ls->_prefetch();
909 }
910 };
911
912 void Journaler::_finish_read(int r, uint64_t offset, uint64_t length,
913 bufferlist& bl)
914 {
915 lock_guard l(lock);
916
917 if (r < 0) {
918 ldout(cct, 0) << "_finish_read got error " << r << dendl;
919 error = r;
920 } else {
921 ldout(cct, 10) << "_finish_read got " << offset << "~" << bl.length()
922 << dendl;
923 if (bl.length() < length) {
924 ldout(cct, 0) << "_finish_read got less than expected (" << length << ")"
925 << dendl;
926 error = -EINVAL;
927 }
928 }
929
930 if (error) {
931 if (on_readable) {
932 C_OnFinisher *f = on_readable;
933 on_readable = 0;
934 f->complete(error);
935 }
936 return;
937 }
938
939 prefetch_buf[offset].swap(bl);
940
941 try {
942 _assimilate_prefetch();
943 } catch (const buffer::error &err) {
944 lderr(cct) << "_decode error from assimilate_prefetch" << dendl;
945 error = -EINVAL;
946 if (on_readable) {
947 C_OnFinisher *f = on_readable;
948 on_readable = 0;
949 f->complete(error);
950 }
951 return;
952 }
953 _prefetch();
954 }
955
956 void Journaler::_assimilate_prefetch()
957 {
958 bool was_readable = readable;
959
960 bool got_any = false;
961 while (!prefetch_buf.empty()) {
962 map<uint64_t,bufferlist>::iterator p = prefetch_buf.begin();
963 if (p->first != received_pos) {
964 uint64_t gap = p->first - received_pos;
965 ldout(cct, 10) << "_assimilate_prefetch gap of " << gap
966 << " from received_pos " << received_pos
967 << " to first prefetched buffer " << p->first << dendl;
968 break;
969 }
970
971 ldout(cct, 10) << "_assimilate_prefetch " << p->first << "~"
972 << p->second.length() << dendl;
973 received_pos += p->second.length();
974 read_buf.claim_append(p->second);
975 assert(received_pos <= requested_pos);
976 prefetch_buf.erase(p);
977 got_any = true;
978 }
979
980 if (got_any) {
981 ldout(cct, 10) << "_assimilate_prefetch read_buf now " << read_pos << "~"
982 << read_buf.length() << ", read pointers " << read_pos
983 << "/" << received_pos << "/" << requested_pos
984 << dendl;
985
986 // Update readability (this will also hit any decode errors resulting
987 // from bad data)
988 readable = _is_readable();
989 }
990
991 if ((got_any && !was_readable && readable) || read_pos == write_pos) {
992 // readable!
993 ldout(cct, 10) << "_finish_read now readable (or at journal end) readable="
994 << readable << " read_pos=" << read_pos << " write_pos="
995 << write_pos << dendl;
996 if (on_readable) {
997 C_OnFinisher *f = on_readable;
998 on_readable = 0;
999 f->complete(0);
1000 }
1001 }
1002 }
1003
1004 void Journaler::_issue_read(uint64_t len)
1005 {
1006 // stuck at safe_pos? (this is needed if we are reading the tail of
1007 // a journal we are also writing to)
1008 assert(requested_pos <= safe_pos);
1009 if (requested_pos == safe_pos) {
1010 ldout(cct, 10) << "_issue_read requested_pos = safe_pos = " << safe_pos
1011 << ", waiting" << dendl;
1012 assert(write_pos > requested_pos);
1013 if (pending_safe.empty()) {
1014 _flush(NULL);
1015 }
1016
1017 // Make sure keys of waitfor_safe map are journal entry boundaries.
1018 // The key we used here is either next_safe_pos or old value of
1019 // next_safe_pos. next_safe_pos is always set to journal entry
1020 // boundary.
1021 auto p = pending_safe.rbegin();
1022 if (p != pending_safe.rend())
1023 waitfor_safe[p->second].push_back(new C_RetryRead(this));
1024 else
1025 waitfor_safe[next_safe_pos].push_back(new C_RetryRead(this));
1026 return;
1027 }
1028
1029 // don't read too much
1030 if (requested_pos + len > safe_pos) {
1031 len = safe_pos - requested_pos;
1032 ldout(cct, 10) << "_issue_read reading only up to safe_pos " << safe_pos
1033 << dendl;
1034 }
1035
1036 // go.
1037 ldout(cct, 10) << "_issue_read reading " << requested_pos << "~" << len
1038 << ", read pointers " << read_pos << "/" << received_pos
1039 << "/" << (requested_pos+len) << dendl;
1040
1041 // step by period (object). _don't_ do a single big filer.read()
1042 // here because it will wait for all object reads to complete before
1043 // giving us back any data. this way we can process whatever bits
1044 // come in that are contiguous.
1045 uint64_t period = get_layout_period();
1046 while (len > 0) {
1047 uint64_t e = requested_pos + period;
1048 e -= e % period;
1049 uint64_t l = e - requested_pos;
1050 if (l > len)
1051 l = len;
1052 C_Read *c = new C_Read(this, requested_pos, l);
1053 filer.read(ino, &layout, CEPH_NOSNAP, requested_pos, l, &c->bl, 0,
1054 wrap_finisher(c), CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
1055 requested_pos += l;
1056 len -= l;
1057 }
1058 }
1059
1060 void Journaler::_prefetch()
1061 {
1062 if (is_stopping())
1063 return;
1064
1065 ldout(cct, 10) << "_prefetch" << dendl;
1066 // prefetch
1067 uint64_t pf;
1068 if (temp_fetch_len) {
1069 ldout(cct, 10) << "_prefetch temp_fetch_len " << temp_fetch_len << dendl;
1070 pf = temp_fetch_len;
1071 temp_fetch_len = 0;
1072 } else {
1073 pf = fetch_len;
1074 }
1075
1076 uint64_t raw_target = read_pos + pf;
1077
1078 // read full log segments, so increase if necessary
1079 uint64_t period = get_layout_period();
1080 uint64_t remainder = raw_target % period;
1081 uint64_t adjustment = remainder ? period - remainder : 0;
1082 uint64_t target = raw_target + adjustment;
1083
1084 // don't read past the log tail
1085 if (target > write_pos)
1086 target = write_pos;
1087
1088 if (requested_pos < target) {
1089 uint64_t len = target - requested_pos;
1090 ldout(cct, 10) << "_prefetch " << pf << " requested_pos " << requested_pos
1091 << " < target " << target << " (" << raw_target
1092 << "), prefetching " << len << dendl;
1093
1094 if (pending_safe.empty() && write_pos > safe_pos) {
1095 // If we are reading and writing the journal, then we may need
1096 // to issue a flush if one isn't already in progress.
1097 // Avoid doing a flush every time so that if we do write/read/write/read
1098 // we don't end up flushing after every write.
1099 ldout(cct, 10) << "_prefetch: requested_pos=" << requested_pos
1100 << ", read_pos=" << read_pos
1101 << ", write_pos=" << write_pos
1102 << ", safe_pos=" << safe_pos << dendl;
1103 _do_flush();
1104 }
1105
1106 _issue_read(len);
1107 }
1108 }
1109
1110
1111 /*
1112 * _is_readable() - return true if next entry is ready.
1113 */
1114 bool Journaler::_is_readable()
1115 {
1116 // anything to read?
1117 if (read_pos == write_pos)
1118 return false;
1119
1120 // Check if the retrieve bytestream has enough for an entry
1121 uint64_t need;
1122 if (journal_stream.readable(read_buf, &need)) {
1123 return true;
1124 }
1125
1126 ldout (cct, 10) << "_is_readable read_buf.length() == " << read_buf.length()
1127 << ", but need " << need << " for next entry; fetch_len is "
1128 << fetch_len << dendl;
1129
1130 // partial fragment at the end?
1131 if (received_pos == write_pos) {
1132 ldout(cct, 10) << "is_readable() detected partial entry at tail, "
1133 "adjusting write_pos to " << read_pos << dendl;
1134
1135 // adjust write_pos
1136 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = read_pos;
1137 assert(write_buf.length() == 0);
1138 assert(waitfor_safe.empty());
1139
1140 // reset read state
1141 requested_pos = received_pos = read_pos;
1142 read_buf.clear();
1143
1144 // FIXME: truncate on disk?
1145
1146 return false;
1147 }
1148
1149 if (need > fetch_len) {
1150 temp_fetch_len = need;
1151 ldout(cct, 10) << "_is_readable noting temp_fetch_len " << temp_fetch_len
1152 << dendl;
1153 }
1154
1155 ldout(cct, 10) << "_is_readable: not readable, returning false" << dendl;
1156 return false;
1157 }
1158
1159 /*
1160 * is_readable() - kickstart prefetch, too
1161 */
1162 bool Journaler::is_readable()
1163 {
1164 lock_guard l(lock);
1165
1166 if (error != 0) {
1167 return false;
1168 }
1169
1170 bool r = readable;
1171 _prefetch();
1172 return r;
1173 }
1174
1175 class Journaler::C_EraseFinish : public Context {
1176 Journaler *journaler;
1177 C_OnFinisher *completion;
1178 public:
1179 C_EraseFinish(Journaler *j, C_OnFinisher *c) : journaler(j), completion(c) {}
1180 void finish(int r) override {
1181 journaler->_finish_erase(r, completion);
1182 }
1183 };
1184
1185 /**
1186 * Entirely erase the journal, including header. For use when you
1187 * have already made a copy of the journal somewhere else.
1188 */
1189 void Journaler::erase(Context *completion)
1190 {
1191 lock_guard l(lock);
1192
1193 // Async delete the journal data
1194 uint64_t first = trimmed_pos / get_layout_period();
1195 uint64_t num = (write_pos - trimmed_pos) / get_layout_period() + 2;
1196 filer.purge_range(ino, &layout, SnapContext(), first, num,
1197 ceph::real_clock::now(), 0,
1198 wrap_finisher(new C_EraseFinish(
1199 this, wrap_finisher(completion))));
1200
1201 // We will not start the operation to delete the header until
1202 // _finish_erase has seen the data deletion succeed: otherwise if
1203 // there was an error deleting data we might prematurely delete the
1204 // header thereby lose our reference to the data.
1205 }
1206
1207 void Journaler::_finish_erase(int data_result, C_OnFinisher *completion)
1208 {
1209 lock_guard l(lock);
1210 if (is_stopping()) {
1211 completion->complete(-EAGAIN);
1212 return;
1213 }
1214
1215 if (data_result == 0) {
1216 // Async delete the journal header
1217 filer.purge_range(ino, &layout, SnapContext(), 0, 1,
1218 ceph::real_clock::now(),
1219 0, wrap_finisher(completion));
1220 } else {
1221 lderr(cct) << "Failed to delete journal " << ino << " data: "
1222 << cpp_strerror(data_result) << dendl;
1223 completion->complete(data_result);
1224 }
1225 }
1226
1227 /* try_read_entry(bl)
1228 * read entry into bl if it's ready.
1229 * otherwise, do nothing.
1230 */
1231 bool Journaler::try_read_entry(bufferlist& bl)
1232 {
1233 lock_guard l(lock);
1234
1235 if (!readable) {
1236 ldout(cct, 10) << "try_read_entry at " << read_pos << " not readable"
1237 << dendl;
1238 return false;
1239 }
1240
1241 uint64_t start_ptr;
1242 size_t consumed;
1243 try {
1244 consumed = journal_stream.read(read_buf, &bl, &start_ptr);
1245 if (stream_format >= JOURNAL_FORMAT_RESILIENT) {
1246 assert(start_ptr == read_pos);
1247 }
1248 } catch (const buffer::error &e) {
1249 lderr(cct) << __func__ << ": decode error from journal_stream" << dendl;
1250 error = -EINVAL;
1251 return false;
1252 }
1253
1254 ldout(cct, 10) << "try_read_entry at " << read_pos << " read "
1255 << read_pos << "~" << consumed << " (have "
1256 << read_buf.length() << ")" << dendl;
1257
1258 read_pos += consumed;
1259 try {
1260 // We were readable, we might not be any more
1261 readable = _is_readable();
1262 } catch (const buffer::error &e) {
1263 lderr(cct) << __func__ << ": decode error from _is_readable" << dendl;
1264 error = -EINVAL;
1265 return false;
1266 }
1267
1268 // prefetch?
1269 _prefetch();
1270 return true;
1271 }
1272
1273 void Journaler::wait_for_readable(Context *onreadable)
1274 {
1275 lock_guard l(lock);
1276 if (is_stopping()) {
1277 finisher->queue(onreadable, -EAGAIN);
1278 return;
1279 }
1280
1281 assert(on_readable == 0);
1282 if (!readable) {
1283 ldout(cct, 10) << "wait_for_readable at " << read_pos << " onreadable "
1284 << onreadable << dendl;
1285 on_readable = wrap_finisher(onreadable);
1286 } else {
1287 // race with OSD reply
1288 finisher->queue(onreadable, 0);
1289 }
1290 }
1291
1292 bool Journaler::have_waiter() const
1293 {
1294 return on_readable != nullptr;
1295 }
1296
1297
1298
1299
1300 /***************** TRIMMING *******************/
1301
1302
1303 class Journaler::C_Trim : public Context {
1304 Journaler *ls;
1305 uint64_t to;
1306 public:
1307 C_Trim(Journaler *l, int64_t t) : ls(l), to(t) {}
1308 void finish(int r) override {
1309 ls->_finish_trim(r, to);
1310 }
1311 };
1312
1313 void Journaler::trim()
1314 {
1315 lock_guard l(lock);
1316 _trim();
1317 }
1318
1319 void Journaler::_trim()
1320 {
1321 if (is_stopping())
1322 return;
1323
1324 assert(!readonly);
1325 uint64_t period = get_layout_period();
1326 uint64_t trim_to = last_committed.expire_pos;
1327 trim_to -= trim_to % period;
1328 ldout(cct, 10) << "trim last_commited head was " << last_committed
1329 << ", can trim to " << trim_to
1330 << dendl;
1331 if (trim_to == 0 || trim_to == trimming_pos) {
1332 ldout(cct, 10) << "trim already trimmed/trimming to "
1333 << trimmed_pos << "/" << trimming_pos << dendl;
1334 return;
1335 }
1336
1337 if (trimming_pos > trimmed_pos) {
1338 ldout(cct, 10) << "trim already trimming atm, try again later. "
1339 "trimmed/trimming is " << trimmed_pos << "/" << trimming_pos << dendl;
1340 return;
1341 }
1342
1343 // trim
1344 assert(trim_to <= write_pos);
1345 assert(trim_to <= expire_pos);
1346 assert(trim_to > trimming_pos);
1347 ldout(cct, 10) << "trim trimming to " << trim_to
1348 << ", trimmed/trimming/expire are "
1349 << trimmed_pos << "/" << trimming_pos << "/" << expire_pos
1350 << dendl;
1351
1352 // delete range of objects
1353 uint64_t first = trimming_pos / period;
1354 uint64_t num = (trim_to - trimming_pos) / period;
1355 SnapContext snapc;
1356 filer.purge_range(ino, &layout, snapc, first, num,
1357 ceph::real_clock::now(), 0,
1358 wrap_finisher(new C_Trim(this, trim_to)));
1359 trimming_pos = trim_to;
1360 }
1361
1362 void Journaler::_finish_trim(int r, uint64_t to)
1363 {
1364 lock_guard l(lock);
1365
1366 assert(!readonly);
1367 ldout(cct, 10) << "_finish_trim trimmed_pos was " << trimmed_pos
1368 << ", trimmed/trimming/expire now "
1369 << to << "/" << trimming_pos << "/" << expire_pos
1370 << dendl;
1371 if (r < 0 && r != -ENOENT) {
1372 lderr(cct) << "_finish_trim got " << cpp_strerror(r) << dendl;
1373 handle_write_error(r);
1374 return;
1375 }
1376
1377 assert(r >= 0 || r == -ENOENT);
1378
1379 assert(to <= trimming_pos);
1380 assert(to > trimmed_pos);
1381 trimmed_pos = to;
1382 }
1383
1384 void Journaler::handle_write_error(int r)
1385 {
1386 // lock is locked
1387
1388 lderr(cct) << "handle_write_error " << cpp_strerror(r) << dendl;
1389 if (on_write_error) {
1390 on_write_error->complete(r);
1391 on_write_error = NULL;
1392 called_write_error = true;
1393 } else if (called_write_error) {
1394 /* We don't call error handler more than once, subsequent errors
1395 * are dropped -- this is okay as long as the error handler does
1396 * something dramatic like respawn */
1397 lderr(cct) << __func__ << ": multiple write errors, handler already called"
1398 << dendl;
1399 } else {
1400 assert(0 == "unhandled write error");
1401 }
1402 }
1403
1404
1405 /**
1406 * Test whether the 'read_buf' byte stream has enough data to read
1407 * an entry
1408 *
1409 * sets 'next_envelope_size' to the number of bytes needed to advance (enough
1410 * to get the next header if header was unavailable, or enough to get the whole
1411 * next entry if the header was available but the body wasn't).
1412 */
1413 bool JournalStream::readable(bufferlist &read_buf, uint64_t *need) const
1414 {
1415 assert(need != NULL);
1416
1417 uint32_t entry_size = 0;
1418 uint64_t entry_sentinel = 0;
1419 bufferlist::iterator p = read_buf.begin();
1420
1421 // Do we have enough data to decode an entry prefix?
1422 if (format >= JOURNAL_FORMAT_RESILIENT) {
1423 *need = sizeof(entry_size) + sizeof(entry_sentinel);
1424 } else {
1425 *need = sizeof(entry_size);
1426 }
1427 if (read_buf.length() >= *need) {
1428 if (format >= JOURNAL_FORMAT_RESILIENT) {
1429 ::decode(entry_sentinel, p);
1430 if (entry_sentinel != sentinel) {
1431 throw buffer::malformed_input("Invalid sentinel");
1432 }
1433 }
1434
1435 ::decode(entry_size, p);
1436 } else {
1437 return false;
1438 }
1439
1440 // Do we have enough data to decode an entry prefix, payload and suffix?
1441 if (format >= JOURNAL_FORMAT_RESILIENT) {
1442 *need = JOURNAL_ENVELOPE_RESILIENT + entry_size;
1443 } else {
1444 *need = JOURNAL_ENVELOPE_LEGACY + entry_size;
1445 }
1446 if (read_buf.length() >= *need) {
1447 return true; // No more bytes needed
1448 }
1449
1450 return false;
1451 }
1452
1453
1454 /**
1455 * Consume one entry from a journal byte stream 'from', splicing a
1456 * serialized LogEvent blob into 'entry'.
1457 *
1458 * 'entry' must be non null and point to an empty bufferlist.
1459 *
1460 * 'from' must contain sufficient valid data (i.e. readable is true).
1461 *
1462 * 'start_ptr' will be set to the entry's start pointer, if the collection
1463 * format provides it. It may not be null.
1464 *
1465 * @returns The number of bytes consumed from the `from` byte stream. Note
1466 * that this is not equal to the length of `entry`, which contains
1467 * the inner serialized LogEvent and not the envelope.
1468 */
1469 size_t JournalStream::read(bufferlist &from, bufferlist *entry,
1470 uint64_t *start_ptr)
1471 {
1472 assert(start_ptr != NULL);
1473 assert(entry != NULL);
1474 assert(entry->length() == 0);
1475
1476 uint32_t entry_size = 0;
1477
1478 // Consume envelope prefix: entry_size and entry_sentinel
1479 bufferlist::iterator from_ptr = from.begin();
1480 if (format >= JOURNAL_FORMAT_RESILIENT) {
1481 uint64_t entry_sentinel = 0;
1482 ::decode(entry_sentinel, from_ptr);
1483 // Assertion instead of clean check because of precondition of this
1484 // fn is that readable() already passed
1485 assert(entry_sentinel == sentinel);
1486 }
1487 ::decode(entry_size, from_ptr);
1488
1489 // Read out the payload
1490 from_ptr.copy(entry_size, *entry);
1491
1492 // Consume the envelope suffix (start_ptr)
1493 if (format >= JOURNAL_FORMAT_RESILIENT) {
1494 ::decode(*start_ptr, from_ptr);
1495 } else {
1496 *start_ptr = 0;
1497 }
1498
1499 // Trim the input buffer to discard the bytes we have consumed
1500 from.splice(0, from_ptr.get_off());
1501
1502 return from_ptr.get_off();
1503 }
1504
1505
1506 /**
1507 * Append one entry
1508 */
1509 size_t JournalStream::write(bufferlist &entry, bufferlist *to,
1510 uint64_t const &start_ptr)
1511 {
1512 assert(to != NULL);
1513
1514 uint32_t const entry_size = entry.length();
1515 if (format >= JOURNAL_FORMAT_RESILIENT) {
1516 ::encode(sentinel, *to);
1517 }
1518 ::encode(entry_size, *to);
1519 to->claim_append(entry);
1520 if (format >= JOURNAL_FORMAT_RESILIENT) {
1521 ::encode(start_ptr, *to);
1522 }
1523
1524 if (format >= JOURNAL_FORMAT_RESILIENT) {
1525 return JOURNAL_ENVELOPE_RESILIENT + entry_size;
1526 } else {
1527 return JOURNAL_ENVELOPE_LEGACY + entry_size;
1528 }
1529 }
1530
1531 /**
1532 * set write error callback
1533 *
1534 * Set a callback/context to trigger if we get a write error from
1535 * the objecter. This may be from an explicit request (e.g., flush)
1536 * or something async the journaler did on its own (e.g., journal
1537 * header update).
1538 *
1539 * It is only used once; if the caller continues to use the
1540 * Journaler and wants to hear about errors, it needs to reset the
1541 * error_handler.
1542 *
1543 * @param c callback/context to trigger on error
1544 */
1545 void Journaler::set_write_error_handler(Context *c) {
1546 lock_guard l(lock);
1547 assert(!on_write_error);
1548 on_write_error = wrap_finisher(c);
1549 called_write_error = false;
1550 }
1551
1552
1553 /**
1554 * Wrap a context in a C_OnFinisher, if it is non-NULL
1555 *
1556 * Utility function to avoid lots of error-prone and verbose
1557 * NULL checking on contexts passed in.
1558 */
1559 C_OnFinisher *Journaler::wrap_finisher(Context *c)
1560 {
1561 if (c != NULL) {
1562 return new C_OnFinisher(c, finisher);
1563 } else {
1564 return NULL;
1565 }
1566 }
1567
1568 void Journaler::shutdown()
1569 {
1570 lock_guard l(lock);
1571
1572 ldout(cct, 1) << __func__ << dendl;
1573
1574 state = STATE_STOPPING;
1575 readable = false;
1576
1577 // Kick out anyone reading from journal
1578 error = -EAGAIN;
1579 if (on_readable) {
1580 C_OnFinisher *f = on_readable;
1581 on_readable = 0;
1582 f->complete(-EAGAIN);
1583 }
1584
1585 list<Context*> ls;
1586 ls.swap(waitfor_recover);
1587 finish_contexts(cct, ls, -ESHUTDOWN);
1588
1589 std::map<uint64_t, std::list<Context*> >::iterator i;
1590 for (i = waitfor_safe.begin(); i != waitfor_safe.end(); ++i) {
1591 finish_contexts(cct, i->second, -EAGAIN);
1592 }
1593 waitfor_safe.clear();
1594 }
1595