1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
2 // vim: ts=8 sw=2 smarttab expandtab
7 #include <seastar/core/circular_buffer.hh>
8 #include <seastar/core/metrics.hh>
9 #include <seastar/core/shared_future.hh>
11 #include "include/buffer.h"
13 #include "crimson/common/errorator.h"
14 #include "crimson/os/seastore/segment_manager_group.h"
15 #include "crimson/os/seastore/segment_seq_allocator.h"
16 #include "crimson/os/seastore/journal/record_submitter.h"
17 #include "crimson/os/seastore/async_cleaner.h"
19 namespace crimson::os::seastore
{
20 class SegmentProvider
;
24 namespace crimson::os::seastore::journal
{
29 * Maintain an available segment for writes.
31 class SegmentAllocator
: public JournalAllocator
{
34 // SegmentAllocator specific methods
35 SegmentAllocator(JournalTrimmer
*trimmer
,
36 data_category_t category
,
39 SegmentSeqAllocator
&ssa
);
41 segment_id_t
get_segment_id() const {
43 return current_segment
->get_segment_id();
46 extent_len_t
get_max_write_length() const {
47 return sm_group
.get_segment_size() -
48 sm_group
.get_rounded_header_length() -
49 sm_group
.get_rounded_tail_length();
54 const std::string
& get_name() const final
{
58 extent_len_t
get_block_size() const final
{
59 return sm_group
.get_block_size();
62 bool can_write() const final
{
63 return !!current_segment
;
66 segment_nonce_t
get_nonce() const final
{
68 return current_segment_nonce
;
71 // returns true iff the current segment has insufficient space
72 bool needs_roll(std::size_t length
) const final
{
74 assert(current_segment
->get_write_capacity() ==
75 sm_group
.get_segment_size());
76 auto write_capacity
= current_segment
->get_write_capacity() -
77 sm_group
.get_rounded_tail_length();
78 return length
+ written_to
> std::size_t(write_capacity
);
81 // open for write and generate the correct print name
82 open_ret
open(bool is_mkfs
) final
;
84 // close the current segment and initialize next one
85 roll_ertr::future
<> roll() final
;
87 // write the buffer, return the write result
89 // May be called concurrently, but writes may complete in any order.
90 // If rolling/opening, no write is allowed.
91 write_ret
write(ceph::bufferlist
&& to_write
) final
;
93 using close_ertr
= base_ertr
;
94 close_ertr::future
<> close() final
;
96 void update_modify_time(record_t
& record
) final
{
97 segment_provider
.update_modify_time(
100 record
.extents
.size());
104 open_ret
do_open(bool is_mkfs
);
107 current_segment
.reset();
110 current_segment_nonce
= 0;
113 using close_segment_ertr
= base_ertr
;
114 close_segment_ertr::future
<> close_segment();
116 // device id is not available during construction,
117 // so generate the print_name later.
118 std::string print_name
;
119 const segment_type_t type
; // JOURNAL or OOL
120 const data_category_t category
;
121 const rewrite_gen_t gen
;
122 SegmentProvider
&segment_provider
;
123 SegmentManagerGroup
&sm_group
;
124 SegmentRef current_segment
;
125 segment_off_t written_to
;
126 SegmentSeqAllocator
&segment_seq_allocator
;
127 segment_nonce_t current_segment_nonce
;
128 JournalTrimmer
*trimmer
;