]> git.proxmox.com Git - ceph.git/blob - ceph/src/osdc/Journaler.cc
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / osdc / Journaler.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include "common/perf_counters.h"
16 #include "common/dout.h"
17 #include "include/Context.h"
18 #include "msg/Messenger.h"
19 #include "osdc/Journaler.h"
20 #include "common/errno.h"
21 #include "include/assert.h"
22 #include "common/Finisher.h"
23
24 #define dout_subsys ceph_subsys_journaler
25 #undef dout_prefix
26 #define dout_prefix *_dout << objecter->messenger->get_myname() \
27 << ".journaler." << name << (readonly ? "(ro) ":"(rw) ")
28
29 using std::chrono::seconds;
30
31
32 class Journaler::C_DelayFlush : public Context {
33 Journaler *journaler;
34 public:
35 C_DelayFlush(Journaler *j) : journaler(j) {}
36 void finish(int r) override {
37 journaler->_do_delayed_flush();
38 }
39 };
40
41 void Journaler::set_readonly()
42 {
43 lock_guard l(lock);
44
45 ldout(cct, 1) << "set_readonly" << dendl;
46 readonly = true;
47 }
48
49 void Journaler::set_writeable()
50 {
51 lock_guard l(lock);
52
53 ldout(cct, 1) << "set_writeable" << dendl;
54 readonly = false;
55 }
56
57 void Journaler::create(file_layout_t *l, stream_format_t const sf)
58 {
59 lock_guard lk(lock);
60
61 assert(!readonly);
62 state = STATE_ACTIVE;
63
64 stream_format = sf;
65 journal_stream.set_format(sf);
66 _set_layout(l);
67
68 prezeroing_pos = prezero_pos = write_pos = flush_pos =
69 safe_pos = read_pos = requested_pos = received_pos =
70 expire_pos = trimming_pos = trimmed_pos =
71 next_safe_pos = layout.get_period();
72
73 ldout(cct, 1) << "created blank journal at inode 0x" << std::hex << ino
74 << std::dec << ", format=" << stream_format << dendl;
75 }
76
77 void Journaler::set_layout(file_layout_t const *l)
78 {
79 lock_guard lk(lock);
80 _set_layout(l);
81 }
82
83 void Journaler::_set_layout(file_layout_t const *l)
84 {
85 layout = *l;
86
87 assert(layout.pool_id == pg_pool);
88 last_written.layout = layout;
89 last_committed.layout = layout;
90
91 // prefetch intelligently.
92 // (watch out, this is big if you use big objects or weird striping)
93 uint64_t periods = cct->_conf->journaler_prefetch_periods;
94 if (periods < 2)
95 periods = 2; // we need at least 2 periods to make progress.
96 fetch_len = layout.get_period() * periods;
97 }
98
99
100 /***************** HEADER *******************/
101
102 ostream& operator<<(ostream &out, const Journaler::Header &h)
103 {
104 return out << "loghead(trim " << h.trimmed_pos
105 << ", expire " << h.expire_pos
106 << ", write " << h.write_pos
107 << ", stream_format " << (int)(h.stream_format)
108 << ")";
109 }
110
111 class Journaler::C_ReadHead : public Context {
112 Journaler *ls;
113 public:
114 bufferlist bl;
115 explicit C_ReadHead(Journaler *l) : ls(l) {}
116 void finish(int r) override {
117 ls->_finish_read_head(r, bl);
118 }
119 };
120
121 class Journaler::C_RereadHead : public Context {
122 Journaler *ls;
123 Context *onfinish;
124 public:
125 bufferlist bl;
126 C_RereadHead(Journaler *l, Context *onfinish_) : ls (l),
127 onfinish(onfinish_) {}
128 void finish(int r) override {
129 ls->_finish_reread_head(r, bl, onfinish);
130 }
131 };
132
133 class Journaler::C_ProbeEnd : public Context {
134 Journaler *ls;
135 public:
136 uint64_t end;
137 explicit C_ProbeEnd(Journaler *l) : ls(l), end(-1) {}
138 void finish(int r) override {
139 ls->_finish_probe_end(r, end);
140 }
141 };
142
143 class Journaler::C_ReProbe : public Context {
144 Journaler *ls;
145 C_OnFinisher *onfinish;
146 public:
147 uint64_t end;
148 C_ReProbe(Journaler *l, C_OnFinisher *onfinish_) :
149 ls(l), onfinish(onfinish_), end(0) {}
150 void finish(int r) override {
151 ls->_finish_reprobe(r, end, onfinish);
152 }
153 };
154
155 void Journaler::recover(Context *onread)
156 {
157 lock_guard l(lock);
158 if (stopping) {
159 onread->complete(-EAGAIN);
160 return;
161 }
162
163 ldout(cct, 1) << "recover start" << dendl;
164 assert(state != STATE_ACTIVE);
165 assert(readonly);
166
167 if (onread)
168 waitfor_recover.push_back(wrap_finisher(onread));
169
170 if (state != STATE_UNDEF) {
171 ldout(cct, 1) << "recover - already recovering" << dendl;
172 return;
173 }
174
175 ldout(cct, 1) << "read_head" << dendl;
176 state = STATE_READHEAD;
177 C_ReadHead *fin = new C_ReadHead(this);
178 _read_head(fin, &fin->bl);
179 }
180
181 void Journaler::_read_head(Context *on_finish, bufferlist *bl)
182 {
183 // lock is locked
184 assert(state == STATE_READHEAD || state == STATE_REREADHEAD);
185
186 object_t oid = file_object_t(ino, 0);
187 object_locator_t oloc(pg_pool);
188 objecter->read_full(oid, oloc, CEPH_NOSNAP, bl, 0, wrap_finisher(on_finish));
189 }
190
191 void Journaler::reread_head(Context *onfinish)
192 {
193 lock_guard l(lock);
194 _reread_head(wrap_finisher(onfinish));
195 }
196
197 /**
198 * Re-read the head from disk, and set the write_pos, expire_pos, trimmed_pos
199 * from the on-disk header. This switches the state to STATE_REREADHEAD for
200 * the duration, and you shouldn't start a re-read while other operations are
201 * in-flight, nor start other operations while a re-read is in progress.
202 * Also, don't call this until the Journaler has finished its recovery and has
203 * gone STATE_ACTIVE!
204 */
205 void Journaler::_reread_head(Context *onfinish)
206 {
207 ldout(cct, 10) << "reread_head" << dendl;
208 assert(state == STATE_ACTIVE);
209
210 state = STATE_REREADHEAD;
211 C_RereadHead *fin = new C_RereadHead(this, onfinish);
212 _read_head(fin, &fin->bl);
213 }
214
215 void Journaler::_finish_reread_head(int r, bufferlist& bl, Context *finish)
216 {
217 lock_guard l(lock);
218
219 //read on-disk header into
220 assert(bl.length() || r < 0 );
221
222 // unpack header
223 if (r == 0) {
224 Header h;
225 bufferlist::iterator p = bl.begin();
226 try {
227 ::decode(h, p);
228 } catch (const buffer::error &e) {
229 finish->complete(-EINVAL);
230 return;
231 }
232 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos
233 = h.write_pos;
234 expire_pos = h.expire_pos;
235 trimmed_pos = trimming_pos = h.trimmed_pos;
236 init_headers(h);
237 state = STATE_ACTIVE;
238 }
239
240 finish->complete(r);
241 }
242
243 void Journaler::_finish_read_head(int r, bufferlist& bl)
244 {
245 lock_guard l(lock);
246
247 assert(state == STATE_READHEAD);
248
249 if (r!=0) {
250 ldout(cct, 0) << "error getting journal off disk" << dendl;
251 list<Context*> ls;
252 ls.swap(waitfor_recover);
253 finish_contexts(cct, ls, r);
254 return;
255 }
256
257 if (bl.length() == 0) {
258 ldout(cct, 1) << "_finish_read_head r=" << r
259 << " read 0 bytes, assuming empty log" << dendl;
260 state = STATE_ACTIVE;
261 list<Context*> ls;
262 ls.swap(waitfor_recover);
263 finish_contexts(cct, ls, 0);
264 return;
265 }
266
267 // unpack header
268 bool corrupt = false;
269 Header h;
270 bufferlist::iterator p = bl.begin();
271 try {
272 ::decode(h, p);
273
274 if (h.magic != magic) {
275 ldout(cct, 0) << "on disk magic '" << h.magic << "' != my magic '"
276 << magic << "'" << dendl;
277 corrupt = true;
278 } else if (h.write_pos < h.expire_pos || h.expire_pos < h.trimmed_pos) {
279 ldout(cct, 0) << "Corrupt header (bad offsets): " << h << dendl;
280 corrupt = true;
281 }
282 } catch (const buffer::error &e) {
283 corrupt = true;
284 }
285
286 if (corrupt) {
287 list<Context*> ls;
288 ls.swap(waitfor_recover);
289 finish_contexts(cct, ls, -EINVAL);
290 return;
291 }
292
293 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos
294 = h.write_pos;
295 read_pos = requested_pos = received_pos = expire_pos = h.expire_pos;
296 trimmed_pos = trimming_pos = h.trimmed_pos;
297
298 init_headers(h);
299 _set_layout(&h.layout);
300 stream_format = h.stream_format;
301 journal_stream.set_format(h.stream_format);
302
303 ldout(cct, 1) << "_finish_read_head " << h
304 << ". probing for end of log (from " << write_pos << ")..."
305 << dendl;
306 C_ProbeEnd *fin = new C_ProbeEnd(this);
307 state = STATE_PROBING;
308 _probe(fin, &fin->end);
309 }
310
311 void Journaler::_probe(Context *finish, uint64_t *end)
312 {
313 // lock is locked
314 ldout(cct, 1) << "probing for end of the log" << dendl;
315 assert(state == STATE_PROBING || state == STATE_REPROBING);
316 // probe the log
317 filer.probe(ino, &layout, CEPH_NOSNAP,
318 write_pos, end, true, 0, wrap_finisher(finish));
319 }
320
321 void Journaler::_reprobe(C_OnFinisher *finish)
322 {
323 ldout(cct, 10) << "reprobe" << dendl;
324 assert(state == STATE_ACTIVE);
325
326 state = STATE_REPROBING;
327 C_ReProbe *fin = new C_ReProbe(this, finish);
328 _probe(fin, &fin->end);
329 }
330
331
332 void Journaler::_finish_reprobe(int r, uint64_t new_end,
333 C_OnFinisher *onfinish)
334 {
335 lock_guard l(lock);
336
337 assert(new_end >= write_pos || r < 0);
338 ldout(cct, 1) << "_finish_reprobe new_end = " << new_end
339 << " (header had " << write_pos << ")."
340 << dendl;
341 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = new_end;
342 state = STATE_ACTIVE;
343 onfinish->complete(r);
344 }
345
346 void Journaler::_finish_probe_end(int r, uint64_t end)
347 {
348 lock_guard l(lock);
349
350 assert(state == STATE_PROBING);
351 if (r < 0) { // error in probing
352 goto out;
353 }
354 if (((int64_t)end) == -1) {
355 end = write_pos;
356 ldout(cct, 1) << "_finish_probe_end write_pos = " << end << " (header had "
357 << write_pos << "). log was empty. recovered." << dendl;
358 ceph_abort(); // hrm.
359 } else {
360 assert(end >= write_pos);
361 ldout(cct, 1) << "_finish_probe_end write_pos = " << end
362 << " (header had " << write_pos << "). recovered."
363 << dendl;
364 }
365
366 state = STATE_ACTIVE;
367
368 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = end;
369
370 out:
371 // done.
372 list<Context*> ls;
373 ls.swap(waitfor_recover);
374 finish_contexts(cct, ls, r);
375 }
376
377 class Journaler::C_RereadHeadProbe : public Context
378 {
379 Journaler *ls;
380 C_OnFinisher *final_finish;
381 public:
382 C_RereadHeadProbe(Journaler *l, C_OnFinisher *finish) :
383 ls(l), final_finish(finish) {}
384 void finish(int r) override {
385 ls->_finish_reread_head_and_probe(r, final_finish);
386 }
387 };
388
389 void Journaler::reread_head_and_probe(Context *onfinish)
390 {
391 lock_guard l(lock);
392
393 assert(state == STATE_ACTIVE);
394 _reread_head(new C_RereadHeadProbe(this, wrap_finisher(onfinish)));
395 }
396
397 void Journaler::_finish_reread_head_and_probe(int r, C_OnFinisher *onfinish)
398 {
399 // Expect to be called back from finish_reread_head, which already takes lock
400 // lock is locked
401
402 assert(!r); //if we get an error, we're boned
403 _reprobe(onfinish);
404 }
405
406
407 // WRITING
408
409 class Journaler::C_WriteHead : public Context {
410 public:
411 Journaler *ls;
412 Header h;
413 C_OnFinisher *oncommit;
414 C_WriteHead(Journaler *l, Header& h_, C_OnFinisher *c) : ls(l), h(h_),
415 oncommit(c) {}
416 void finish(int r) override {
417 ls->_finish_write_head(r, h, oncommit);
418 }
419 };
420
421 void Journaler::write_head(Context *oncommit)
422 {
423 lock_guard l(lock);
424 _write_head(oncommit);
425 }
426
427
428 void Journaler::_write_head(Context *oncommit)
429 {
430 assert(!readonly);
431 assert(state == STATE_ACTIVE);
432 last_written.trimmed_pos = trimmed_pos;
433 last_written.expire_pos = expire_pos;
434 last_written.unused_field = expire_pos;
435 last_written.write_pos = safe_pos;
436 last_written.stream_format = stream_format;
437 ldout(cct, 10) << "write_head " << last_written << dendl;
438
439 // Avoid persisting bad pointers in case of bugs
440 assert(last_written.write_pos >= last_written.expire_pos);
441 assert(last_written.expire_pos >= last_written.trimmed_pos);
442
443 last_wrote_head = ceph::real_clock::now();
444
445 bufferlist bl;
446 ::encode(last_written, bl);
447 SnapContext snapc;
448
449 object_t oid = file_object_t(ino, 0);
450 object_locator_t oloc(pg_pool);
451 objecter->write_full(oid, oloc, snapc, bl, ceph::real_clock::now(), 0,
452 wrap_finisher(new C_WriteHead(
453 this, last_written,
454 wrap_finisher(oncommit))),
455 0, 0, write_iohint);
456 }
457
458 void Journaler::_finish_write_head(int r, Header &wrote,
459 C_OnFinisher *oncommit)
460 {
461 lock_guard l(lock);
462
463 if (r < 0) {
464 lderr(cct) << "_finish_write_head got " << cpp_strerror(r) << dendl;
465 handle_write_error(r);
466 return;
467 }
468 assert(!readonly);
469 ldout(cct, 10) << "_finish_write_head " << wrote << dendl;
470 last_committed = wrote;
471 if (oncommit) {
472 oncommit->complete(r);
473 }
474
475 _trim(); // trim?
476 }
477
478
479 /***************** WRITING *******************/
480
481 class Journaler::C_Flush : public Context {
482 Journaler *ls;
483 uint64_t start;
484 ceph::real_time stamp;
485 public:
486 C_Flush(Journaler *l, int64_t s, ceph::real_time st)
487 : ls(l), start(s), stamp(st) {}
488 void finish(int r) override {
489 ls->_finish_flush(r, start, stamp);
490 }
491 };
492
493 void Journaler::_finish_flush(int r, uint64_t start, ceph::real_time stamp)
494 {
495 lock_guard l(lock);
496 assert(!readonly);
497
498 if (r < 0) {
499 lderr(cct) << "_finish_flush got " << cpp_strerror(r) << dendl;
500 handle_write_error(r);
501 return;
502 }
503
504 assert(start < flush_pos);
505
506 // calc latency?
507 if (logger) {
508 ceph::timespan lat = ceph::real_clock::now() - stamp;
509 logger->tinc(logger_key_lat, lat);
510 }
511
512 // adjust safe_pos
513 auto it = pending_safe.find(start);
514 assert(it != pending_safe.end());
515 pending_safe.erase(it);
516 if (pending_safe.empty())
517 safe_pos = next_safe_pos;
518 else
519 safe_pos = pending_safe.begin()->second;
520
521 ldout(cct, 10) << "_finish_flush safe from " << start
522 << ", pending_safe " << pending_safe
523 << ", (prezeroing/prezero)/write/flush/safe positions now "
524 << "(" << prezeroing_pos << "/" << prezero_pos << ")/"
525 << write_pos << "/" << flush_pos << "/" << safe_pos
526 << dendl;
527
528 // kick waiters <= safe_pos
529 while (!waitfor_safe.empty()) {
530 if (waitfor_safe.begin()->first > safe_pos)
531 break;
532 finish_contexts(cct, waitfor_safe.begin()->second);
533 waitfor_safe.erase(waitfor_safe.begin());
534 }
535 }
536
537
538
539 uint64_t Journaler::append_entry(bufferlist& bl)
540 {
541 unique_lock l(lock);
542
543 assert(!readonly);
544 uint32_t s = bl.length();
545
546 // append
547 size_t delta = bl.length() + journal_stream.get_envelope_size();
548 // write_buf space is nearly full
549 if (!write_buf_throttle.get_or_fail(delta)) {
550 l.unlock();
551 ldout(cct, 10) << "write_buf_throttle wait, delta " << delta << dendl;
552 write_buf_throttle.get(delta);
553 l.lock();
554 }
555 ldout(cct, 20) << "write_buf_throttle get, delta " << delta << dendl;
556 size_t wrote = journal_stream.write(bl, &write_buf, write_pos);
557 ldout(cct, 10) << "append_entry len " << s << " to " << write_pos << "~"
558 << wrote << dendl;
559 write_pos += wrote;
560
561 // flush previous object?
562 uint64_t su = get_layout_period();
563 assert(su > 0);
564 uint64_t write_off = write_pos % su;
565 uint64_t write_obj = write_pos / su;
566 uint64_t flush_obj = flush_pos / su;
567 if (write_obj != flush_obj) {
568 ldout(cct, 10) << " flushing completed object(s) (su " << su << " wro "
569 << write_obj << " flo " << flush_obj << ")" << dendl;
570 _do_flush(write_buf.length() - write_off);
571 if (write_off) {
572 // current entry isn't being flushed, set next_safe_pos to the end of previous entry
573 next_safe_pos = write_pos - wrote;
574 }
575 }
576
577 return write_pos;
578 }
579
580
581 void Journaler::_do_flush(unsigned amount)
582 {
583 if (write_pos == flush_pos)
584 return;
585 assert(write_pos > flush_pos);
586 assert(!readonly);
587
588 // flush
589 uint64_t len = write_pos - flush_pos;
590 assert(len == write_buf.length());
591 if (amount && amount < len)
592 len = amount;
593
594 // zero at least two full periods ahead. this ensures
595 // that the next object will not exist.
596 uint64_t period = get_layout_period();
597 if (flush_pos + len + 2*period > prezero_pos) {
598 _issue_prezero();
599
600 int64_t newlen = prezero_pos - flush_pos - period;
601 if (newlen <= 0) {
602 ldout(cct, 10) << "_do_flush wanted to do " << flush_pos << "~" << len
603 << " already too close to prezero_pos " << prezero_pos
604 << ", zeroing first" << dendl;
605 waiting_for_zero = true;
606 return;
607 }
608 if (static_cast<uint64_t>(newlen) < len) {
609 ldout(cct, 10) << "_do_flush wanted to do " << flush_pos << "~" << len
610 << " but hit prezero_pos " << prezero_pos
611 << ", will do " << flush_pos << "~" << newlen << dendl;
612 len = newlen;
613 } else {
614 waiting_for_zero = false;
615 }
616 } else {
617 waiting_for_zero = false;
618 }
619 ldout(cct, 10) << "_do_flush flushing " << flush_pos << "~" << len << dendl;
620
621 // submit write for anything pending
622 // flush _start_ pos to _finish_flush
623 ceph::real_time now = ceph::real_clock::now();
624 SnapContext snapc;
625
626 Context *onsafe = new C_Flush(this, flush_pos, now); // on COMMIT
627 pending_safe[flush_pos] = next_safe_pos;
628
629 bufferlist write_bl;
630
631 // adjust pointers
632 if (len == write_buf.length()) {
633 write_bl.swap(write_buf);
634 next_safe_pos = write_pos;
635 } else {
636 write_buf.splice(0, len, &write_bl);
637 }
638
639 filer.write(ino, &layout, snapc,
640 flush_pos, len, write_bl, ceph::real_clock::now(),
641 0,
642 wrap_finisher(onsafe), write_iohint);
643
644 flush_pos += len;
645 assert(write_buf.length() == write_pos - flush_pos);
646 write_buf_throttle.put(len);
647 ldout(cct, 20) << "write_buf_throttle put, len " << len << dendl;
648
649 ldout(cct, 10)
650 << "_do_flush (prezeroing/prezero)/write/flush/safe pointers now at "
651 << "(" << prezeroing_pos << "/" << prezero_pos << ")/" << write_pos
652 << "/" << flush_pos << "/" << safe_pos << dendl;
653
654 _issue_prezero();
655 }
656
657
658 void Journaler::wait_for_flush(Context *onsafe)
659 {
660 lock_guard l(lock);
661 if (stopping) {
662 onsafe->complete(-EAGAIN);
663 return;
664 }
665 _wait_for_flush(onsafe);
666 }
667
668 void Journaler::_wait_for_flush(Context *onsafe)
669 {
670 assert(!readonly);
671
672 // all flushed and safe?
673 if (write_pos == safe_pos) {
674 assert(write_buf.length() == 0);
675 ldout(cct, 10)
676 << "flush nothing to flush, (prezeroing/prezero)/write/flush/safe "
677 "pointers at " << "(" << prezeroing_pos << "/" << prezero_pos << ")/"
678 << write_pos << "/" << flush_pos << "/" << safe_pos << dendl;
679 if (onsafe) {
680 finisher->queue(onsafe, 0);
681 }
682 return;
683 }
684
685 // queue waiter
686 if (onsafe) {
687 waitfor_safe[write_pos].push_back(wrap_finisher(onsafe));
688 }
689 }
690
691 void Journaler::flush(Context *onsafe)
692 {
693 lock_guard l(lock);
694 _flush(wrap_finisher(onsafe));
695 }
696
697 void Journaler::_flush(C_OnFinisher *onsafe)
698 {
699 assert(!readonly);
700
701 if (write_pos == flush_pos) {
702 assert(write_buf.length() == 0);
703 ldout(cct, 10) << "flush nothing to flush, (prezeroing/prezero)/write/"
704 "flush/safe pointers at " << "(" << prezeroing_pos << "/" << prezero_pos
705 << ")/" << write_pos << "/" << flush_pos << "/" << safe_pos
706 << dendl;
707 if (onsafe) {
708 onsafe->complete(0);
709 }
710 } else {
711 _do_flush();
712 _wait_for_flush(onsafe);
713 }
714
715 // write head?
716 if (_write_head_needed()) {
717 _write_head();
718 }
719 }
720
721 bool Journaler::_write_head_needed()
722 {
723 return last_wrote_head + seconds(cct->_conf->journaler_write_head_interval)
724 < ceph::real_clock::now();
725 }
726
727
728 /*************** prezeroing ******************/
729
730 struct C_Journaler_Prezero : public Context {
731 Journaler *journaler;
732 uint64_t from, len;
733 C_Journaler_Prezero(Journaler *j, uint64_t f, uint64_t l)
734 : journaler(j), from(f), len(l) {}
735 void finish(int r) override {
736 journaler->_finish_prezero(r, from, len);
737 }
738 };
739
740 void Journaler::_issue_prezero()
741 {
742 assert(prezeroing_pos >= flush_pos);
743
744 // we need to zero at least two periods, minimum, to ensure that we
745 // have a full empty object/period in front of us.
746 uint64_t num_periods = MAX(2, cct->_conf->journaler_prezero_periods);
747
748 /*
749 * issue zero requests based on write_pos, even though the invariant
750 * is that we zero ahead of flush_pos.
751 */
752 uint64_t period = get_layout_period();
753 uint64_t to = write_pos + period * num_periods + period - 1;
754 to -= to % period;
755
756 if (prezeroing_pos >= to) {
757 ldout(cct, 20) << "_issue_prezero target " << to << " <= prezeroing_pos "
758 << prezeroing_pos << dendl;
759 return;
760 }
761
762 while (prezeroing_pos < to) {
763 uint64_t len;
764 if (prezeroing_pos % period == 0) {
765 len = period;
766 ldout(cct, 10) << "_issue_prezero removing " << prezeroing_pos << "~"
767 << period << " (full period)" << dendl;
768 } else {
769 len = period - (prezeroing_pos % period);
770 ldout(cct, 10) << "_issue_prezero zeroing " << prezeroing_pos << "~"
771 << len << " (partial period)" << dendl;
772 }
773 SnapContext snapc;
774 Context *c = wrap_finisher(new C_Journaler_Prezero(this, prezeroing_pos,
775 len));
776 filer.zero(ino, &layout, snapc, prezeroing_pos, len,
777 ceph::real_clock::now(), 0, c);
778 prezeroing_pos += len;
779 }
780 }
781
782 // Lock cycle because we get called out of objecter callback (holding
783 // objecter read lock), but there are also cases where we take the journaler
784 // lock before calling into objecter to do I/O.
785 void Journaler::_finish_prezero(int r, uint64_t start, uint64_t len)
786 {
787 lock_guard l(lock);
788
789 ldout(cct, 10) << "_prezeroed to " << start << "~" << len
790 << ", prezeroing/prezero was " << prezeroing_pos << "/"
791 << prezero_pos << ", pending " << pending_zero
792 << dendl;
793 if (r < 0 && r != -ENOENT) {
794 lderr(cct) << "_prezeroed got " << cpp_strerror(r) << dendl;
795 handle_write_error(r);
796 return;
797 }
798
799 assert(r == 0 || r == -ENOENT);
800
801 if (start == prezero_pos) {
802 prezero_pos += len;
803 while (!pending_zero.empty() &&
804 pending_zero.begin().get_start() == prezero_pos) {
805 interval_set<uint64_t>::iterator b(pending_zero.begin());
806 prezero_pos += b.get_len();
807 pending_zero.erase(b);
808 }
809
810 if (waiting_for_zero) {
811 _do_flush();
812 }
813 } else {
814 pending_zero.insert(start, len);
815 }
816 ldout(cct, 10) << "_prezeroed prezeroing/prezero now " << prezeroing_pos
817 << "/" << prezero_pos
818 << ", pending " << pending_zero
819 << dendl;
820 }
821
822
823
824 /***************** READING *******************/
825
826
827 class Journaler::C_Read : public Context {
828 Journaler *ls;
829 uint64_t offset;
830 uint64_t length;
831 public:
832 bufferlist bl;
833 C_Read(Journaler *j, uint64_t o, uint64_t l) : ls(j), offset(o), length(l) {}
834 void finish(int r) override {
835 ls->_finish_read(r, offset, length, bl);
836 }
837 };
838
839 class Journaler::C_RetryRead : public Context {
840 Journaler *ls;
841 public:
842 explicit C_RetryRead(Journaler *l) : ls(l) {}
843
844 void finish(int r) override {
845 // Should only be called from waitfor_safe i.e. already inside lock
846 // (ls->lock is locked
847 ls->_prefetch();
848 }
849 };
850
851 void Journaler::_finish_read(int r, uint64_t offset, uint64_t length,
852 bufferlist& bl)
853 {
854 lock_guard l(lock);
855
856 if (r < 0) {
857 ldout(cct, 0) << "_finish_read got error " << r << dendl;
858 error = r;
859 } else {
860 ldout(cct, 10) << "_finish_read got " << offset << "~" << bl.length()
861 << dendl;
862 if (bl.length() < length) {
863 ldout(cct, 0) << "_finish_read got less than expected (" << length << ")"
864 << dendl;
865 error = -EINVAL;
866 }
867 }
868
869 if (error) {
870 if (on_readable) {
871 C_OnFinisher *f = on_readable;
872 on_readable = 0;
873 f->complete(error);
874 }
875 return;
876 }
877
878 prefetch_buf[offset].swap(bl);
879
880 try {
881 _assimilate_prefetch();
882 } catch (const buffer::error &err) {
883 lderr(cct) << "_decode error from assimilate_prefetch" << dendl;
884 error = -EINVAL;
885 if (on_readable) {
886 C_OnFinisher *f = on_readable;
887 on_readable = 0;
888 f->complete(error);
889 }
890 return;
891 }
892 _prefetch();
893 }
894
895 void Journaler::_assimilate_prefetch()
896 {
897 bool was_readable = readable;
898
899 bool got_any = false;
900 while (!prefetch_buf.empty()) {
901 map<uint64_t,bufferlist>::iterator p = prefetch_buf.begin();
902 if (p->first != received_pos) {
903 uint64_t gap = p->first - received_pos;
904 ldout(cct, 10) << "_assimilate_prefetch gap of " << gap
905 << " from received_pos " << received_pos
906 << " to first prefetched buffer " << p->first << dendl;
907 break;
908 }
909
910 ldout(cct, 10) << "_assimilate_prefetch " << p->first << "~"
911 << p->second.length() << dendl;
912 received_pos += p->second.length();
913 read_buf.claim_append(p->second);
914 assert(received_pos <= requested_pos);
915 prefetch_buf.erase(p);
916 got_any = true;
917 }
918
919 if (got_any) {
920 ldout(cct, 10) << "_assimilate_prefetch read_buf now " << read_pos << "~"
921 << read_buf.length() << ", read pointers " << read_pos
922 << "/" << received_pos << "/" << requested_pos
923 << dendl;
924
925 // Update readability (this will also hit any decode errors resulting
926 // from bad data)
927 readable = _is_readable();
928 }
929
930 if ((got_any && !was_readable && readable) || read_pos == write_pos) {
931 // readable!
932 ldout(cct, 10) << "_finish_read now readable (or at journal end) readable="
933 << readable << " read_pos=" << read_pos << " write_pos="
934 << write_pos << dendl;
935 if (on_readable) {
936 C_OnFinisher *f = on_readable;
937 on_readable = 0;
938 f->complete(0);
939 }
940 }
941 }
942
943 void Journaler::_issue_read(uint64_t len)
944 {
945 // stuck at safe_pos? (this is needed if we are reading the tail of
946 // a journal we are also writing to)
947 assert(requested_pos <= safe_pos);
948 if (requested_pos == safe_pos) {
949 ldout(cct, 10) << "_issue_read requested_pos = safe_pos = " << safe_pos
950 << ", waiting" << dendl;
951 assert(write_pos > requested_pos);
952 if (pending_safe.empty()) {
953 _flush(NULL);
954 }
955 waitfor_safe[flush_pos].push_back(new C_RetryRead(this));
956 return;
957 }
958
959 // don't read too much
960 if (requested_pos + len > safe_pos) {
961 len = safe_pos - requested_pos;
962 ldout(cct, 10) << "_issue_read reading only up to safe_pos " << safe_pos
963 << dendl;
964 }
965
966 // go.
967 ldout(cct, 10) << "_issue_read reading " << requested_pos << "~" << len
968 << ", read pointers " << read_pos << "/" << received_pos
969 << "/" << (requested_pos+len) << dendl;
970
971 // step by period (object). _don't_ do a single big filer.read()
972 // here because it will wait for all object reads to complete before
973 // giving us back any data. this way we can process whatever bits
974 // come in that are contiguous.
975 uint64_t period = get_layout_period();
976 while (len > 0) {
977 uint64_t e = requested_pos + period;
978 e -= e % period;
979 uint64_t l = e - requested_pos;
980 if (l > len)
981 l = len;
982 C_Read *c = new C_Read(this, requested_pos, l);
983 filer.read(ino, &layout, CEPH_NOSNAP, requested_pos, l, &c->bl, 0,
984 wrap_finisher(c), CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
985 requested_pos += l;
986 len -= l;
987 }
988 }
989
990 void Journaler::_prefetch()
991 {
992 ldout(cct, 10) << "_prefetch" << dendl;
993 // prefetch
994 uint64_t pf;
995 if (temp_fetch_len) {
996 ldout(cct, 10) << "_prefetch temp_fetch_len " << temp_fetch_len << dendl;
997 pf = temp_fetch_len;
998 temp_fetch_len = 0;
999 } else {
1000 pf = fetch_len;
1001 }
1002
1003 uint64_t raw_target = read_pos + pf;
1004
1005 // read full log segments, so increase if necessary
1006 uint64_t period = get_layout_period();
1007 uint64_t remainder = raw_target % period;
1008 uint64_t adjustment = remainder ? period - remainder : 0;
1009 uint64_t target = raw_target + adjustment;
1010
1011 // don't read past the log tail
1012 if (target > write_pos)
1013 target = write_pos;
1014
1015 if (requested_pos < target) {
1016 uint64_t len = target - requested_pos;
1017 ldout(cct, 10) << "_prefetch " << pf << " requested_pos " << requested_pos
1018 << " < target " << target << " (" << raw_target
1019 << "), prefetching " << len << dendl;
1020
1021 if (pending_safe.empty() && write_pos > safe_pos) {
1022 // If we are reading and writing the journal, then we may need
1023 // to issue a flush if one isn't already in progress.
1024 // Avoid doing a flush every time so that if we do write/read/write/read
1025 // we don't end up flushing after every write.
1026 ldout(cct, 10) << "_prefetch: requested_pos=" << requested_pos
1027 << ", read_pos=" << read_pos
1028 << ", write_pos=" << write_pos
1029 << ", safe_pos=" << safe_pos << dendl;
1030 _do_flush();
1031 }
1032
1033 _issue_read(len);
1034 }
1035 }
1036
1037
1038 /*
1039 * _is_readable() - return true if next entry is ready.
1040 */
1041 bool Journaler::_is_readable()
1042 {
1043 // anything to read?
1044 if (read_pos == write_pos)
1045 return false;
1046
1047 // Check if the retrieve bytestream has enough for an entry
1048 uint64_t need;
1049 if (journal_stream.readable(read_buf, &need)) {
1050 return true;
1051 }
1052
1053 ldout (cct, 10) << "_is_readable read_buf.length() == " << read_buf.length()
1054 << ", but need " << need << " for next entry; fetch_len is "
1055 << fetch_len << dendl;
1056
1057 // partial fragment at the end?
1058 if (received_pos == write_pos) {
1059 ldout(cct, 10) << "is_readable() detected partial entry at tail, "
1060 "adjusting write_pos to " << read_pos << dendl;
1061
1062 // adjust write_pos
1063 prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = read_pos;
1064 assert(write_buf.length() == 0);
1065
1066 // reset read state
1067 requested_pos = received_pos = read_pos;
1068 read_buf.clear();
1069
1070 // FIXME: truncate on disk?
1071
1072 return false;
1073 }
1074
1075 if (need > fetch_len) {
1076 temp_fetch_len = need;
1077 ldout(cct, 10) << "_is_readable noting temp_fetch_len " << temp_fetch_len
1078 << dendl;
1079 }
1080
1081 ldout(cct, 10) << "_is_readable: not readable, returning false" << dendl;
1082 return false;
1083 }
1084
1085 /*
1086 * is_readable() - kickstart prefetch, too
1087 */
1088 bool Journaler::is_readable()
1089 {
1090 lock_guard l(lock);
1091
1092 if (error != 0) {
1093 return false;
1094 }
1095
1096 bool r = readable;
1097 _prefetch();
1098 return r;
1099 }
1100
1101 class Journaler::C_EraseFinish : public Context {
1102 Journaler *journaler;
1103 C_OnFinisher *completion;
1104 public:
1105 C_EraseFinish(Journaler *j, C_OnFinisher *c) : journaler(j), completion(c) {}
1106 void finish(int r) override {
1107 journaler->_finish_erase(r, completion);
1108 }
1109 };
1110
1111 /**
1112 * Entirely erase the journal, including header. For use when you
1113 * have already made a copy of the journal somewhere else.
1114 */
1115 void Journaler::erase(Context *completion)
1116 {
1117 lock_guard l(lock);
1118
1119 // Async delete the journal data
1120 uint64_t first = trimmed_pos / get_layout_period();
1121 uint64_t num = (write_pos - trimmed_pos) / get_layout_period() + 2;
1122 filer.purge_range(ino, &layout, SnapContext(), first, num,
1123 ceph::real_clock::now(), 0,
1124 wrap_finisher(new C_EraseFinish(
1125 this, wrap_finisher(completion))));
1126
1127 // We will not start the operation to delete the header until
1128 // _finish_erase has seen the data deletion succeed: otherwise if
1129 // there was an error deleting data we might prematurely delete the
1130 // header thereby lose our reference to the data.
1131 }
1132
1133 void Journaler::_finish_erase(int data_result, C_OnFinisher *completion)
1134 {
1135 lock_guard l(lock);
1136
1137 if (data_result == 0) {
1138 // Async delete the journal header
1139 filer.purge_range(ino, &layout, SnapContext(), 0, 1,
1140 ceph::real_clock::now(),
1141 0, wrap_finisher(completion));
1142 } else {
1143 lderr(cct) << "Failed to delete journal " << ino << " data: "
1144 << cpp_strerror(data_result) << dendl;
1145 completion->complete(data_result);
1146 }
1147 }
1148
1149 /* try_read_entry(bl)
1150 * read entry into bl if it's ready.
1151 * otherwise, do nothing.
1152 */
1153 bool Journaler::try_read_entry(bufferlist& bl)
1154 {
1155 lock_guard l(lock);
1156
1157 if (!readable) {
1158 ldout(cct, 10) << "try_read_entry at " << read_pos << " not readable"
1159 << dendl;
1160 return false;
1161 }
1162
1163 uint64_t start_ptr;
1164 size_t consumed;
1165 try {
1166 consumed = journal_stream.read(read_buf, &bl, &start_ptr);
1167 if (stream_format >= JOURNAL_FORMAT_RESILIENT) {
1168 assert(start_ptr == read_pos);
1169 }
1170 } catch (const buffer::error &e) {
1171 lderr(cct) << __func__ << ": decode error from journal_stream" << dendl;
1172 error = -EINVAL;
1173 return false;
1174 }
1175
1176 ldout(cct, 10) << "try_read_entry at " << read_pos << " read "
1177 << read_pos << "~" << consumed << " (have "
1178 << read_buf.length() << ")" << dendl;
1179
1180 read_pos += consumed;
1181 try {
1182 // We were readable, we might not be any more
1183 readable = _is_readable();
1184 } catch (const buffer::error &e) {
1185 lderr(cct) << __func__ << ": decode error from _is_readable" << dendl;
1186 error = -EINVAL;
1187 return false;
1188 }
1189
1190 // prefetch?
1191 _prefetch();
1192 return true;
1193 }
1194
1195 void Journaler::wait_for_readable(Context *onreadable)
1196 {
1197 lock_guard l(lock);
1198 if (stopping) {
1199 onreadable->complete(-EAGAIN);
1200 return;
1201 }
1202
1203 assert(on_readable == 0);
1204 if (!readable) {
1205 ldout(cct, 10) << "wait_for_readable at " << read_pos << " onreadable "
1206 << onreadable << dendl;
1207 on_readable = wrap_finisher(onreadable);
1208 } else {
1209 // race with OSD reply
1210 finisher->queue(onreadable, 0);
1211 }
1212 }
1213
1214 bool Journaler::have_waiter() const
1215 {
1216 return on_readable != nullptr;
1217 }
1218
1219
1220
1221
1222 /***************** TRIMMING *******************/
1223
1224
1225 class Journaler::C_Trim : public Context {
1226 Journaler *ls;
1227 uint64_t to;
1228 public:
1229 C_Trim(Journaler *l, int64_t t) : ls(l), to(t) {}
1230 void finish(int r) override {
1231 ls->_finish_trim(r, to);
1232 }
1233 };
1234
1235 void Journaler::trim()
1236 {
1237 lock_guard l(lock);
1238 _trim();
1239 }
1240
1241 void Journaler::_trim()
1242 {
1243 assert(!readonly);
1244 uint64_t period = get_layout_period();
1245 uint64_t trim_to = last_committed.expire_pos;
1246 trim_to -= trim_to % period;
1247 ldout(cct, 10) << "trim last_commited head was " << last_committed
1248 << ", can trim to " << trim_to
1249 << dendl;
1250 if (trim_to == 0 || trim_to == trimming_pos) {
1251 ldout(cct, 10) << "trim already trimmed/trimming to "
1252 << trimmed_pos << "/" << trimming_pos << dendl;
1253 return;
1254 }
1255
1256 if (trimming_pos > trimmed_pos) {
1257 ldout(cct, 10) << "trim already trimming atm, try again later. "
1258 "trimmed/trimming is " << trimmed_pos << "/" << trimming_pos << dendl;
1259 return;
1260 }
1261
1262 // trim
1263 assert(trim_to <= write_pos);
1264 assert(trim_to <= expire_pos);
1265 assert(trim_to > trimming_pos);
1266 ldout(cct, 10) << "trim trimming to " << trim_to
1267 << ", trimmed/trimming/expire are "
1268 << trimmed_pos << "/" << trimming_pos << "/" << expire_pos
1269 << dendl;
1270
1271 // delete range of objects
1272 uint64_t first = trimming_pos / period;
1273 uint64_t num = (trim_to - trimming_pos) / period;
1274 SnapContext snapc;
1275 filer.purge_range(ino, &layout, snapc, first, num,
1276 ceph::real_clock::now(), 0,
1277 wrap_finisher(new C_Trim(this, trim_to)));
1278 trimming_pos = trim_to;
1279 }
1280
1281 void Journaler::_finish_trim(int r, uint64_t to)
1282 {
1283 lock_guard l(lock);
1284
1285 assert(!readonly);
1286 ldout(cct, 10) << "_finish_trim trimmed_pos was " << trimmed_pos
1287 << ", trimmed/trimming/expire now "
1288 << to << "/" << trimming_pos << "/" << expire_pos
1289 << dendl;
1290 if (r < 0 && r != -ENOENT) {
1291 lderr(cct) << "_finish_trim got " << cpp_strerror(r) << dendl;
1292 handle_write_error(r);
1293 return;
1294 }
1295
1296 assert(r >= 0 || r == -ENOENT);
1297
1298 assert(to <= trimming_pos);
1299 assert(to > trimmed_pos);
1300 trimmed_pos = to;
1301 }
1302
1303 void Journaler::handle_write_error(int r)
1304 {
1305 // lock is locked
1306
1307 lderr(cct) << "handle_write_error " << cpp_strerror(r) << dendl;
1308 if (on_write_error) {
1309 on_write_error->complete(r);
1310 on_write_error = NULL;
1311 called_write_error = true;
1312 } else if (called_write_error) {
1313 /* We don't call error handler more than once, subsequent errors
1314 * are dropped -- this is okay as long as the error handler does
1315 * something dramatic like respawn */
1316 lderr(cct) << __func__ << ": multiple write errors, handler already called"
1317 << dendl;
1318 } else {
1319 assert(0 == "unhandled write error");
1320 }
1321 }
1322
1323
1324 /**
1325 * Test whether the 'read_buf' byte stream has enough data to read
1326 * an entry
1327 *
1328 * sets 'next_envelope_size' to the number of bytes needed to advance (enough
1329 * to get the next header if header was unavailable, or enough to get the whole
1330 * next entry if the header was available but the body wasn't).
1331 */
1332 bool JournalStream::readable(bufferlist &read_buf, uint64_t *need) const
1333 {
1334 assert(need != NULL);
1335
1336 uint32_t entry_size = 0;
1337 uint64_t entry_sentinel = 0;
1338 bufferlist::iterator p = read_buf.begin();
1339
1340 // Do we have enough data to decode an entry prefix?
1341 if (format >= JOURNAL_FORMAT_RESILIENT) {
1342 *need = sizeof(entry_size) + sizeof(entry_sentinel);
1343 } else {
1344 *need = sizeof(entry_size);
1345 }
1346 if (read_buf.length() >= *need) {
1347 if (format >= JOURNAL_FORMAT_RESILIENT) {
1348 ::decode(entry_sentinel, p);
1349 if (entry_sentinel != sentinel) {
1350 throw buffer::malformed_input("Invalid sentinel");
1351 }
1352 }
1353
1354 ::decode(entry_size, p);
1355 } else {
1356 return false;
1357 }
1358
1359 // Do we have enough data to decode an entry prefix, payload and suffix?
1360 if (format >= JOURNAL_FORMAT_RESILIENT) {
1361 *need = JOURNAL_ENVELOPE_RESILIENT + entry_size;
1362 } else {
1363 *need = JOURNAL_ENVELOPE_LEGACY + entry_size;
1364 }
1365 if (read_buf.length() >= *need) {
1366 return true; // No more bytes needed
1367 }
1368
1369 return false;
1370 }
1371
1372
1373 /**
1374 * Consume one entry from a journal byte stream 'from', splicing a
1375 * serialized LogEvent blob into 'entry'.
1376 *
1377 * 'entry' must be non null and point to an empty bufferlist.
1378 *
1379 * 'from' must contain sufficient valid data (i.e. readable is true).
1380 *
1381 * 'start_ptr' will be set to the entry's start pointer, if the collection
1382 * format provides it. It may not be null.
1383 *
1384 * @returns The number of bytes consumed from the `from` byte stream. Note
1385 * that this is not equal to the length of `entry`, which contains
1386 * the inner serialized LogEvent and not the envelope.
1387 */
1388 size_t JournalStream::read(bufferlist &from, bufferlist *entry,
1389 uint64_t *start_ptr)
1390 {
1391 assert(start_ptr != NULL);
1392 assert(entry != NULL);
1393 assert(entry->length() == 0);
1394
1395 uint32_t entry_size = 0;
1396
1397 // Consume envelope prefix: entry_size and entry_sentinel
1398 bufferlist::iterator from_ptr = from.begin();
1399 if (format >= JOURNAL_FORMAT_RESILIENT) {
1400 uint64_t entry_sentinel = 0;
1401 ::decode(entry_sentinel, from_ptr);
1402 // Assertion instead of clean check because of precondition of this
1403 // fn is that readable() already passed
1404 assert(entry_sentinel == sentinel);
1405 }
1406 ::decode(entry_size, from_ptr);
1407
1408 // Read out the payload
1409 from_ptr.copy(entry_size, *entry);
1410
1411 // Consume the envelope suffix (start_ptr)
1412 if (format >= JOURNAL_FORMAT_RESILIENT) {
1413 ::decode(*start_ptr, from_ptr);
1414 } else {
1415 *start_ptr = 0;
1416 }
1417
1418 // Trim the input buffer to discard the bytes we have consumed
1419 from.splice(0, from_ptr.get_off());
1420
1421 return from_ptr.get_off();
1422 }
1423
1424
1425 /**
1426 * Append one entry
1427 */
1428 size_t JournalStream::write(bufferlist &entry, bufferlist *to,
1429 uint64_t const &start_ptr)
1430 {
1431 assert(to != NULL);
1432
1433 uint32_t const entry_size = entry.length();
1434 if (format >= JOURNAL_FORMAT_RESILIENT) {
1435 ::encode(sentinel, *to);
1436 }
1437 ::encode(entry_size, *to);
1438 to->claim_append(entry);
1439 if (format >= JOURNAL_FORMAT_RESILIENT) {
1440 ::encode(start_ptr, *to);
1441 }
1442
1443 if (format >= JOURNAL_FORMAT_RESILIENT) {
1444 return JOURNAL_ENVELOPE_RESILIENT + entry_size;
1445 } else {
1446 return JOURNAL_ENVELOPE_LEGACY + entry_size;
1447 }
1448 }
1449
1450 /**
1451 * set write error callback
1452 *
1453 * Set a callback/context to trigger if we get a write error from
1454 * the objecter. This may be from an explicit request (e.g., flush)
1455 * or something async the journaler did on its own (e.g., journal
1456 * header update).
1457 *
1458 * It is only used once; if the caller continues to use the
1459 * Journaler and wants to hear about errors, it needs to reset the
1460 * error_handler.
1461 *
1462 * @param c callback/context to trigger on error
1463 */
1464 void Journaler::set_write_error_handler(Context *c) {
1465 lock_guard l(lock);
1466 assert(!on_write_error);
1467 on_write_error = wrap_finisher(c);
1468 called_write_error = false;
1469 }
1470
1471
1472 /**
1473 * Wrap a context in a C_OnFinisher, if it is non-NULL
1474 *
1475 * Utility function to avoid lots of error-prone and verbose
1476 * NULL checking on contexts passed in.
1477 */
1478 C_OnFinisher *Journaler::wrap_finisher(Context *c)
1479 {
1480 if (c != NULL) {
1481 return new C_OnFinisher(c, finisher);
1482 } else {
1483 return NULL;
1484 }
1485 }
1486
1487 void Journaler::shutdown()
1488 {
1489 lock_guard l(lock);
1490
1491 ldout(cct, 1) << __func__ << dendl;
1492
1493 readable = false;
1494 stopping = true;
1495
1496 // Kick out anyone reading from journal
1497 error = -EAGAIN;
1498 if (on_readable) {
1499 C_OnFinisher *f = on_readable;
1500 on_readable = 0;
1501 f->complete(-EAGAIN);
1502 }
1503
1504 finish_contexts(cct, waitfor_recover, -ESHUTDOWN);
1505
1506 std::map<uint64_t, std::list<Context*> >::iterator i;
1507 for (i = waitfor_safe.begin(); i != waitfor_safe.end(); ++i) {
1508 finish_contexts(cct, i->second, -EAGAIN);
1509 }
1510 waitfor_safe.clear();
1511 }
1512