]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/db/version_set.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / rocksdb / db / version_set.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 //
10 // The representation of a DBImpl consists of a set of Versions. The
11 // newest version is called "current". Older versions may be kept
12 // around to provide a consistent view to live iterators.
13 //
14 // Each Version keeps track of a set of Table files per level. The
15 // entire set of versions is maintained in a VersionSet.
16 //
17 // Version,VersionSet are thread-compatible, but require external
18 // synchronization on all accesses.
19
20 #pragma once
21 #include <atomic>
22 #include <deque>
23 #include <limits>
24 #include <map>
25 #include <memory>
26 #include <set>
27 #include <string>
28 #include <utility>
29 #include <vector>
30
31 #include "db/column_family.h"
32 #include "db/compaction.h"
33 #include "db/compaction_picker.h"
34 #include "db/dbformat.h"
35 #include "db/file_indexer.h"
36 #include "db/log_reader.h"
37 #include "db/range_del_aggregator.h"
38 #include "db/read_callback.h"
39 #include "db/table_cache.h"
40 #include "db/version_builder.h"
41 #include "db/version_edit.h"
42 #include "db/write_controller.h"
43 #include "monitoring/instrumented_mutex.h"
44 #include "options/db_options.h"
45 #include "port/port.h"
46 #include "rocksdb/env.h"
47
48 namespace rocksdb {
49
50 namespace log {
51 class Writer;
52 }
53
54 class Compaction;
55 class LogBuffer;
56 class LookupKey;
57 class MemTable;
58 class Version;
59 class VersionSet;
60 class WriteBufferManager;
61 class MergeContext;
62 class ColumnFamilySet;
63 class TableCache;
64 class MergeIteratorBuilder;
65
66 // Return the smallest index i such that file_level.files[i]->largest >= key.
67 // Return file_level.num_files if there is no such file.
68 // REQUIRES: "file_level.files" contains a sorted list of
69 // non-overlapping files.
70 extern int FindFile(const InternalKeyComparator& icmp,
71 const LevelFilesBrief& file_level, const Slice& key);
72
73 // Returns true iff some file in "files" overlaps the user key range
74 // [*smallest,*largest].
75 // smallest==nullptr represents a key smaller than all keys in the DB.
76 // largest==nullptr represents a key largest than all keys in the DB.
77 // REQUIRES: If disjoint_sorted_files, file_level.files[]
78 // contains disjoint ranges in sorted order.
79 extern bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
80 bool disjoint_sorted_files,
81 const LevelFilesBrief& file_level,
82 const Slice* smallest_user_key,
83 const Slice* largest_user_key);
84
85 // Generate LevelFilesBrief from vector<FdWithKeyRange*>
86 // Would copy smallest_key and largest_key data to sequential memory
87 // arena: Arena used to allocate the memory
88 extern void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
89 const std::vector<FileMetaData*>& files,
90 Arena* arena);
91
92 class VersionStorageInfo {
93 public:
94 VersionStorageInfo(const InternalKeyComparator* internal_comparator,
95 const Comparator* user_comparator, int num_levels,
96 CompactionStyle compaction_style,
97 VersionStorageInfo* src_vstorage,
98 bool _force_consistency_checks);
99 ~VersionStorageInfo();
100
101 void Reserve(int level, size_t size) { files_[level].reserve(size); }
102
103 void AddFile(int level, FileMetaData* f, Logger* info_log = nullptr);
104
105 void SetFinalized();
106
107 // Update num_non_empty_levels_.
108 void UpdateNumNonEmptyLevels();
109
110 void GenerateFileIndexer() {
111 file_indexer_.UpdateIndex(&arena_, num_non_empty_levels_, files_);
112 }
113
114 // Update the accumulated stats from a file-meta.
115 void UpdateAccumulatedStats(FileMetaData* file_meta);
116
117 // Decrease the current stat from a to-be-deleted file-meta
118 void RemoveCurrentStats(FileMetaData* file_meta);
119
120 void ComputeCompensatedSizes();
121
122 // Updates internal structures that keep track of compaction scores
123 // We use compaction scores to figure out which compaction to do next
124 // REQUIRES: db_mutex held!!
125 // TODO find a better way to pass compaction_options_fifo.
126 void ComputeCompactionScore(const ImmutableCFOptions& immutable_cf_options,
127 const MutableCFOptions& mutable_cf_options);
128
129 // Estimate est_comp_needed_bytes_
130 void EstimateCompactionBytesNeeded(
131 const MutableCFOptions& mutable_cf_options);
132
133 // This computes files_marked_for_compaction_ and is called by
134 // ComputeCompactionScore()
135 void ComputeFilesMarkedForCompaction();
136
137 // This computes ttl_expired_files_ and is called by
138 // ComputeCompactionScore()
139 void ComputeExpiredTtlFiles(const ImmutableCFOptions& ioptions,
140 const uint64_t ttl);
141
142 // This computes bottommost_files_marked_for_compaction_ and is called by
143 // ComputeCompactionScore() or UpdateOldestSnapshot().
144 //
145 // Among bottommost files (assumes they've already been computed), marks the
146 // ones that have keys that would be eliminated if recompacted, according to
147 // the seqnum of the oldest existing snapshot. Must be called every time
148 // oldest snapshot changes as that is when bottom-level files can become
149 // eligible for compaction.
150 //
151 // REQUIRES: DB mutex held
152 void ComputeBottommostFilesMarkedForCompaction();
153
154 // Generate level_files_brief_ from files_
155 void GenerateLevelFilesBrief();
156 // Sort all files for this version based on their file size and
157 // record results in files_by_compaction_pri_. The largest files are listed
158 // first.
159 void UpdateFilesByCompactionPri(CompactionPri compaction_pri);
160
161 void GenerateLevel0NonOverlapping();
162 bool level0_non_overlapping() const {
163 return level0_non_overlapping_;
164 }
165
166 // Check whether each file in this version is bottommost (i.e., nothing in its
167 // key-range could possibly exist in an older file/level).
168 // REQUIRES: This version has not been saved
169 void GenerateBottommostFiles();
170
171 // Updates the oldest snapshot and related internal state, like the bottommost
172 // files marked for compaction.
173 // REQUIRES: DB mutex held
174 void UpdateOldestSnapshot(SequenceNumber oldest_snapshot_seqnum);
175
176 int MaxInputLevel() const;
177 int MaxOutputLevel(bool allow_ingest_behind) const;
178
179 // Return level number that has idx'th highest score
180 int CompactionScoreLevel(int idx) const { return compaction_level_[idx]; }
181
182 // Return idx'th highest score
183 double CompactionScore(int idx) const { return compaction_score_[idx]; }
184
185 void GetOverlappingInputs(
186 int level, const InternalKey* begin, // nullptr means before all keys
187 const InternalKey* end, // nullptr means after all keys
188 std::vector<FileMetaData*>* inputs,
189 int hint_index = -1, // index of overlap file
190 int* file_index = nullptr, // return index of overlap file
191 bool expand_range = true, // if set, returns files which overlap the
192 // range and overlap each other. If false,
193 // then just files intersecting the range
194 InternalKey** next_smallest = nullptr) // if non-null, returns the
195 const; // smallest key of next file not included
196 void GetCleanInputsWithinInterval(
197 int level, const InternalKey* begin, // nullptr means before all keys
198 const InternalKey* end, // nullptr means after all keys
199 std::vector<FileMetaData*>* inputs,
200 int hint_index = -1, // index of overlap file
201 int* file_index = nullptr) // return index of overlap file
202 const;
203
204 void GetOverlappingInputsRangeBinarySearch(
205 int level, // level > 0
206 const InternalKey* begin, // nullptr means before all keys
207 const InternalKey* end, // nullptr means after all keys
208 std::vector<FileMetaData*>* inputs,
209 int hint_index, // index of overlap file
210 int* file_index, // return index of overlap file
211 bool within_interval = false, // if set, force the inputs within interval
212 InternalKey** next_smallest = nullptr) // if non-null, returns the
213 const; // smallest key of next file not included
214
215 void ExtendFileRangeOverlappingInterval(
216 int level,
217 const InternalKey* begin, // nullptr means before all keys
218 const InternalKey* end, // nullptr means after all keys
219 unsigned int index, // start extending from this index
220 int* startIndex, // return the startIndex of input range
221 int* endIndex) // return the endIndex of input range
222 const;
223
224 void ExtendFileRangeWithinInterval(
225 int level,
226 const InternalKey* begin, // nullptr means before all keys
227 const InternalKey* end, // nullptr means after all keys
228 unsigned int index, // start extending from this index
229 int* startIndex, // return the startIndex of input range
230 int* endIndex) // return the endIndex of input range
231 const;
232
233 // Returns true iff some file in the specified level overlaps
234 // some part of [*smallest_user_key,*largest_user_key].
235 // smallest_user_key==NULL represents a key smaller than all keys in the DB.
236 // largest_user_key==NULL represents a key largest than all keys in the DB.
237 bool OverlapInLevel(int level, const Slice* smallest_user_key,
238 const Slice* largest_user_key);
239
240 // Returns true iff the first or last file in inputs contains
241 // an overlapping user key to the file "just outside" of it (i.e.
242 // just after the last file, or just before the first file)
243 // REQUIRES: "*inputs" is a sorted list of non-overlapping files
244 bool HasOverlappingUserKey(const std::vector<FileMetaData*>* inputs,
245 int level);
246
247 int num_levels() const { return num_levels_; }
248
249 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
250 int num_non_empty_levels() const {
251 assert(finalized_);
252 return num_non_empty_levels_;
253 }
254
255 // REQUIRES: This version has been finalized.
256 // (CalculateBaseBytes() is called)
257 // This may or may not return number of level files. It is to keep backward
258 // compatible behavior in universal compaction.
259 int l0_delay_trigger_count() const { return l0_delay_trigger_count_; }
260
261 void set_l0_delay_trigger_count(int v) { l0_delay_trigger_count_ = v; }
262
263 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
264 int NumLevelFiles(int level) const {
265 assert(finalized_);
266 return static_cast<int>(files_[level].size());
267 }
268
269 // Return the combined file size of all files at the specified level.
270 uint64_t NumLevelBytes(int level) const;
271
272 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
273 const std::vector<FileMetaData*>& LevelFiles(int level) const {
274 return files_[level];
275 }
276
277 const rocksdb::LevelFilesBrief& LevelFilesBrief(int level) const {
278 assert(level < static_cast<int>(level_files_brief_.size()));
279 return level_files_brief_[level];
280 }
281
282 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
283 const std::vector<int>& FilesByCompactionPri(int level) const {
284 assert(finalized_);
285 return files_by_compaction_pri_[level];
286 }
287
288 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
289 // REQUIRES: DB mutex held during access
290 const autovector<std::pair<int, FileMetaData*>>& FilesMarkedForCompaction()
291 const {
292 assert(finalized_);
293 return files_marked_for_compaction_;
294 }
295
296 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
297 // REQUIRES: DB mutex held during access
298 const autovector<std::pair<int, FileMetaData*>>& ExpiredTtlFiles() const {
299 assert(finalized_);
300 return expired_ttl_files_;
301 }
302
303 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
304 // REQUIRES: DB mutex held during access
305 const autovector<std::pair<int, FileMetaData*>>&
306 BottommostFilesMarkedForCompaction() const {
307 assert(finalized_);
308 return bottommost_files_marked_for_compaction_;
309 }
310
311 int base_level() const { return base_level_; }
312
313 // REQUIRES: lock is held
314 // Set the index that is used to offset into files_by_compaction_pri_ to find
315 // the next compaction candidate file.
316 void SetNextCompactionIndex(int level, int index) {
317 next_file_to_compact_by_size_[level] = index;
318 }
319
320 // REQUIRES: lock is held
321 int NextCompactionIndex(int level) const {
322 return next_file_to_compact_by_size_[level];
323 }
324
325 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
326 const FileIndexer& file_indexer() const {
327 assert(finalized_);
328 return file_indexer_;
329 }
330
331 // Only the first few entries of files_by_compaction_pri_ are sorted.
332 // There is no need to sort all the files because it is likely
333 // that on a running system, we need to look at only the first
334 // few largest files because a new version is created every few
335 // seconds/minutes (because of concurrent compactions).
336 static const size_t kNumberFilesToSort = 50;
337
338 // Return a human-readable short (single-line) summary of the number
339 // of files per level. Uses *scratch as backing store.
340 struct LevelSummaryStorage {
341 char buffer[1000];
342 };
343 struct FileSummaryStorage {
344 char buffer[3000];
345 };
346 const char* LevelSummary(LevelSummaryStorage* scratch) const;
347 // Return a human-readable short (single-line) summary of files
348 // in a specified level. Uses *scratch as backing store.
349 const char* LevelFileSummary(FileSummaryStorage* scratch, int level) const;
350
351 // Return the maximum overlapping data (in bytes) at next level for any
352 // file at a level >= 1.
353 int64_t MaxNextLevelOverlappingBytes();
354
355 // Return a human readable string that describes this version's contents.
356 std::string DebugString(bool hex = false) const;
357
358 uint64_t GetAverageValueSize() const {
359 if (accumulated_num_non_deletions_ == 0) {
360 return 0;
361 }
362 assert(accumulated_raw_key_size_ + accumulated_raw_value_size_ > 0);
363 assert(accumulated_file_size_ > 0);
364 return accumulated_raw_value_size_ / accumulated_num_non_deletions_ *
365 accumulated_file_size_ /
366 (accumulated_raw_key_size_ + accumulated_raw_value_size_);
367 }
368
369 uint64_t GetEstimatedActiveKeys() const;
370
371 double GetEstimatedCompressionRatioAtLevel(int level) const;
372
373 // re-initializes the index that is used to offset into
374 // files_by_compaction_pri_
375 // to find the next compaction candidate file.
376 void ResetNextCompactionIndex(int level) {
377 next_file_to_compact_by_size_[level] = 0;
378 }
379
380 const InternalKeyComparator* InternalComparator() {
381 return internal_comparator_;
382 }
383
384 // Returns maximum total bytes of data on a given level.
385 uint64_t MaxBytesForLevel(int level) const;
386
387 // Must be called after any change to MutableCFOptions.
388 void CalculateBaseBytes(const ImmutableCFOptions& ioptions,
389 const MutableCFOptions& options);
390
391 // Returns an estimate of the amount of live data in bytes.
392 uint64_t EstimateLiveDataSize() const;
393
394 uint64_t estimated_compaction_needed_bytes() const {
395 return estimated_compaction_needed_bytes_;
396 }
397
398 void TEST_set_estimated_compaction_needed_bytes(uint64_t v) {
399 estimated_compaction_needed_bytes_ = v;
400 }
401
402 bool force_consistency_checks() const { return force_consistency_checks_; }
403
404 // Returns whether any key in [`smallest_key`, `largest_key`] could appear in
405 // an older L0 file than `last_l0_idx` or in a greater level than `last_level`
406 //
407 // @param last_level Level after which we check for overlap
408 // @param last_l0_idx If `last_level == 0`, index of L0 file after which we
409 // check for overlap; otherwise, must be -1
410 bool RangeMightExistAfterSortedRun(const Slice& smallest_key,
411 const Slice& largest_key, int last_level,
412 int last_l0_idx);
413
414 private:
415 const InternalKeyComparator* internal_comparator_;
416 const Comparator* user_comparator_;
417 int num_levels_; // Number of levels
418 int num_non_empty_levels_; // Number of levels. Any level larger than it
419 // is guaranteed to be empty.
420 // Per-level max bytes
421 std::vector<uint64_t> level_max_bytes_;
422
423 // A short brief metadata of files per level
424 autovector<rocksdb::LevelFilesBrief> level_files_brief_;
425 FileIndexer file_indexer_;
426 Arena arena_; // Used to allocate space for file_levels_
427
428 CompactionStyle compaction_style_;
429
430 // List of files per level, files in each level are arranged
431 // in increasing order of keys
432 std::vector<FileMetaData*>* files_;
433
434 // Level that L0 data should be compacted to. All levels < base_level_ should
435 // be empty. -1 if it is not level-compaction so it's not applicable.
436 int base_level_;
437
438 // A list for the same set of files that are stored in files_,
439 // but files in each level are now sorted based on file
440 // size. The file with the largest size is at the front.
441 // This vector stores the index of the file from files_.
442 std::vector<std::vector<int>> files_by_compaction_pri_;
443
444 // If true, means that files in L0 have keys with non overlapping ranges
445 bool level0_non_overlapping_;
446
447 // An index into files_by_compaction_pri_ that specifies the first
448 // file that is not yet compacted
449 std::vector<int> next_file_to_compact_by_size_;
450
451 // Only the first few entries of files_by_compaction_pri_ are sorted.
452 // There is no need to sort all the files because it is likely
453 // that on a running system, we need to look at only the first
454 // few largest files because a new version is created every few
455 // seconds/minutes (because of concurrent compactions).
456 static const size_t number_of_files_to_sort_ = 50;
457
458 // This vector contains list of files marked for compaction and also not
459 // currently being compacted. It is protected by DB mutex. It is calculated in
460 // ComputeCompactionScore()
461 autovector<std::pair<int, FileMetaData*>> files_marked_for_compaction_;
462
463 autovector<std::pair<int, FileMetaData*>> expired_ttl_files_;
464
465 // These files are considered bottommost because none of their keys can exist
466 // at lower levels. They are not necessarily all in the same level. The marked
467 // ones are eligible for compaction because they contain duplicate key
468 // versions that are no longer protected by snapshot. These variables are
469 // protected by DB mutex and are calculated in `GenerateBottommostFiles()` and
470 // `ComputeBottommostFilesMarkedForCompaction()`.
471 autovector<std::pair<int, FileMetaData*>> bottommost_files_;
472 autovector<std::pair<int, FileMetaData*>>
473 bottommost_files_marked_for_compaction_;
474
475 // Threshold for needing to mark another bottommost file. Maintain it so we
476 // can quickly check when releasing a snapshot whether more bottommost files
477 // became eligible for compaction. It's defined as the min of the max nonzero
478 // seqnums of unmarked bottommost files.
479 SequenceNumber bottommost_files_mark_threshold_ = kMaxSequenceNumber;
480
481 // Monotonically increases as we release old snapshots. Zero indicates no
482 // snapshots have been released yet. When no snapshots remain we set it to the
483 // current seqnum, which needs to be protected as a snapshot can still be
484 // created that references it.
485 SequenceNumber oldest_snapshot_seqnum_ = 0;
486
487 // Level that should be compacted next and its compaction score.
488 // Score < 1 means compaction is not strictly needed. These fields
489 // are initialized by Finalize().
490 // The most critical level to be compacted is listed first
491 // These are used to pick the best compaction level
492 std::vector<double> compaction_score_;
493 std::vector<int> compaction_level_;
494 int l0_delay_trigger_count_ = 0; // Count used to trigger slow down and stop
495 // for number of L0 files.
496
497 // the following are the sampled temporary stats.
498 // the current accumulated size of sampled files.
499 uint64_t accumulated_file_size_;
500 // the current accumulated size of all raw keys based on the sampled files.
501 uint64_t accumulated_raw_key_size_;
502 // the current accumulated size of all raw keys based on the sampled files.
503 uint64_t accumulated_raw_value_size_;
504 // total number of non-deletion entries
505 uint64_t accumulated_num_non_deletions_;
506 // total number of deletion entries
507 uint64_t accumulated_num_deletions_;
508 // current number of non_deletion entries
509 uint64_t current_num_non_deletions_;
510 // current number of deletion entries
511 uint64_t current_num_deletions_;
512 // current number of file samples
513 uint64_t current_num_samples_;
514 // Estimated bytes needed to be compacted until all levels' size is down to
515 // target sizes.
516 uint64_t estimated_compaction_needed_bytes_;
517
518 bool finalized_;
519
520 // If set to true, we will run consistency checks even if RocksDB
521 // is compiled in release mode
522 bool force_consistency_checks_;
523
524 friend class Version;
525 friend class VersionSet;
526 // No copying allowed
527 VersionStorageInfo(const VersionStorageInfo&) = delete;
528 void operator=(const VersionStorageInfo&) = delete;
529 };
530
531 class Version {
532 public:
533 // Append to *iters a sequence of iterators that will
534 // yield the contents of this Version when merged together.
535 // REQUIRES: This version has been saved (see VersionSet::SaveTo)
536 void AddIterators(const ReadOptions&, const EnvOptions& soptions,
537 MergeIteratorBuilder* merger_iter_builder,
538 RangeDelAggregator* range_del_agg);
539
540 void AddIteratorsForLevel(const ReadOptions&, const EnvOptions& soptions,
541 MergeIteratorBuilder* merger_iter_builder,
542 int level, RangeDelAggregator* range_del_agg);
543
544 Status OverlapWithLevelIterator(const ReadOptions&, const EnvOptions&,
545 const Slice& smallest_user_key,
546 const Slice& largest_user_key,
547 int level, bool* overlap);
548
549 // Lookup the value for key. If found, store it in *val and
550 // return OK. Else return a non-OK status.
551 // Uses *operands to store merge_operator operations to apply later.
552 //
553 // If the ReadOptions.read_tier is set to do a read-only fetch, then
554 // *value_found will be set to false if it cannot be determined whether
555 // this value exists without doing IO.
556 //
557 // If the key is Deleted, *status will be set to NotFound and
558 // *key_exists will be set to true.
559 // If no key was found, *status will be set to NotFound and
560 // *key_exists will be set to false.
561 // If seq is non-null, *seq will be set to the sequence number found
562 // for the key if a key was found.
563 //
564 // REQUIRES: lock is not held
565 void Get(const ReadOptions&, const LookupKey& key, PinnableSlice* value,
566 Status* status, MergeContext* merge_context,
567 RangeDelAggregator* range_del_agg, bool* value_found = nullptr,
568 bool* key_exists = nullptr, SequenceNumber* seq = nullptr,
569 ReadCallback* callback = nullptr, bool* is_blob = nullptr);
570
571 // Loads some stats information from files. Call without mutex held. It needs
572 // to be called before applying the version to the version set.
573 void PrepareApply(const MutableCFOptions& mutable_cf_options,
574 bool update_stats);
575
576 // Reference count management (so Versions do not disappear out from
577 // under live iterators)
578 void Ref();
579 // Decrease reference count. Delete the object if no reference left
580 // and return true. Otherwise, return false.
581 bool Unref();
582
583 // Add all files listed in the current version to *live.
584 void AddLiveFiles(std::vector<FileDescriptor>* live);
585
586 // Return a human readable string that describes this version's contents.
587 std::string DebugString(bool hex = false, bool print_stats = false) const;
588
589 // Returns the version number of this version
590 uint64_t GetVersionNumber() const { return version_number_; }
591
592 // REQUIRES: lock is held
593 // On success, "tp" will contains the table properties of the file
594 // specified in "file_meta". If the file name of "file_meta" is
595 // known ahead, passing it by a non-null "fname" can save a
596 // file-name conversion.
597 Status GetTableProperties(std::shared_ptr<const TableProperties>* tp,
598 const FileMetaData* file_meta,
599 const std::string* fname = nullptr) const;
600
601 // REQUIRES: lock is held
602 // On success, *props will be populated with all SSTables' table properties.
603 // The keys of `props` are the sst file name, the values of `props` are the
604 // tables' properties, represented as shared_ptr.
605 Status GetPropertiesOfAllTables(TablePropertiesCollection* props);
606 Status GetPropertiesOfAllTables(TablePropertiesCollection* props, int level);
607 Status GetPropertiesOfTablesInRange(const Range* range, std::size_t n,
608 TablePropertiesCollection* props) const;
609
610 // REQUIRES: lock is held
611 // On success, "tp" will contains the aggregated table property among
612 // the table properties of all sst files in this version.
613 Status GetAggregatedTableProperties(
614 std::shared_ptr<const TableProperties>* tp, int level = -1);
615
616 uint64_t GetEstimatedActiveKeys() {
617 return storage_info_.GetEstimatedActiveKeys();
618 }
619
620 size_t GetMemoryUsageByTableReaders();
621
622 ColumnFamilyData* cfd() const { return cfd_; }
623
624 // Return the next Version in the linked list. Used for debug only
625 Version* TEST_Next() const {
626 return next_;
627 }
628
629 int TEST_refs() const { return refs_; }
630
631 VersionStorageInfo* storage_info() { return &storage_info_; }
632
633 VersionSet* version_set() { return vset_; }
634
635 void GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta);
636
637 uint64_t GetSstFilesSize();
638
639 MutableCFOptions GetMutableCFOptions() { return mutable_cf_options_; }
640
641 private:
642 Env* env_;
643 friend class VersionSet;
644
645 const InternalKeyComparator* internal_comparator() const {
646 return storage_info_.internal_comparator_;
647 }
648 const Comparator* user_comparator() const {
649 return storage_info_.user_comparator_;
650 }
651
652 bool PrefixMayMatch(const ReadOptions& read_options,
653 InternalIterator* level_iter,
654 const Slice& internal_prefix) const;
655
656 // Returns true if the filter blocks in the specified level will not be
657 // checked during read operations. In certain cases (trivial move or preload),
658 // the filter block may already be cached, but we still do not access it such
659 // that it eventually expires from the cache.
660 bool IsFilterSkipped(int level, bool is_file_last_in_level = false);
661
662 // The helper function of UpdateAccumulatedStats, which may fill the missing
663 // fields of file_meta from its associated TableProperties.
664 // Returns true if it does initialize FileMetaData.
665 bool MaybeInitializeFileMetaData(FileMetaData* file_meta);
666
667 // Update the accumulated stats associated with the current version.
668 // This accumulated stats will be used in compaction.
669 void UpdateAccumulatedStats(bool update_stats);
670
671 // Sort all files for this version based on their file size and
672 // record results in files_by_compaction_pri_. The largest files are listed
673 // first.
674 void UpdateFilesByCompactionPri();
675
676 ColumnFamilyData* cfd_; // ColumnFamilyData to which this Version belongs
677 Logger* info_log_;
678 Statistics* db_statistics_;
679 TableCache* table_cache_;
680 const MergeOperator* merge_operator_;
681
682 VersionStorageInfo storage_info_;
683 VersionSet* vset_; // VersionSet to which this Version belongs
684 Version* next_; // Next version in linked list
685 Version* prev_; // Previous version in linked list
686 int refs_; // Number of live refs to this version
687 const EnvOptions env_options_;
688 const MutableCFOptions mutable_cf_options_;
689
690 // A version number that uniquely represents this version. This is
691 // used for debugging and logging purposes only.
692 uint64_t version_number_;
693
694 Version(ColumnFamilyData* cfd, VersionSet* vset, const EnvOptions& env_opt,
695 MutableCFOptions mutable_cf_options, uint64_t version_number = 0);
696
697 ~Version();
698
699 // No copying allowed
700 Version(const Version&);
701 void operator=(const Version&);
702 };
703
704 struct ObsoleteFileInfo {
705 FileMetaData* metadata;
706 std::string path;
707
708 ObsoleteFileInfo() noexcept : metadata(nullptr) {}
709 ObsoleteFileInfo(FileMetaData* f, const std::string& file_path)
710 : metadata(f), path(file_path) {}
711
712 ObsoleteFileInfo(const ObsoleteFileInfo&) = delete;
713 ObsoleteFileInfo& operator=(const ObsoleteFileInfo&) = delete;
714
715 ObsoleteFileInfo(ObsoleteFileInfo&& rhs) noexcept :
716 ObsoleteFileInfo() {
717 *this = std::move(rhs);
718 }
719
720 ObsoleteFileInfo& operator=(ObsoleteFileInfo&& rhs) noexcept {
721 path = std::move(rhs.path);
722 metadata = rhs.metadata;
723 rhs.metadata = nullptr;
724
725 return *this;
726 }
727
728 void DeleteMetadata() {
729 delete metadata;
730 metadata = nullptr;
731 }
732 };
733
734 namespace {
735 class BaseReferencedVersionBuilder;
736 }
737
738 class VersionSet {
739 public:
740 VersionSet(const std::string& dbname, const ImmutableDBOptions* db_options,
741 const EnvOptions& env_options, Cache* table_cache,
742 WriteBufferManager* write_buffer_manager,
743 WriteController* write_controller);
744 ~VersionSet();
745
746 // Apply *edit to the current version to form a new descriptor that
747 // is both saved to persistent state and installed as the new
748 // current version. Will release *mu while actually writing to the file.
749 // column_family_options has to be set if edit is column family add
750 // REQUIRES: *mu is held on entry.
751 // REQUIRES: no other thread concurrently calls LogAndApply()
752 Status LogAndApply(
753 ColumnFamilyData* column_family_data,
754 const MutableCFOptions& mutable_cf_options, VersionEdit* edit,
755 InstrumentedMutex* mu, Directory* db_directory = nullptr,
756 bool new_descriptor_log = false,
757 const ColumnFamilyOptions* column_family_options = nullptr) {
758 std::vector<ColumnFamilyData*> cfds(1, column_family_data);
759 std::vector<MutableCFOptions> mutable_cf_options_list(1,
760 mutable_cf_options);
761 std::vector<autovector<VersionEdit*>> edit_lists(1, {edit});
762 return LogAndApply(cfds, mutable_cf_options_list, edit_lists, mu,
763 db_directory, new_descriptor_log, column_family_options);
764 }
765 // The batch version. If edit_list.size() > 1, caller must ensure that
766 // no edit in the list column family add or drop
767 Status LogAndApply(
768 ColumnFamilyData* column_family_data,
769 const MutableCFOptions& mutable_cf_options,
770 const autovector<VersionEdit*>& edit_list, InstrumentedMutex* mu,
771 Directory* db_directory = nullptr, bool new_descriptor_log = false,
772 const ColumnFamilyOptions* column_family_options = nullptr) {
773 std::vector<ColumnFamilyData*> cfds(1, column_family_data);
774 std::vector<MutableCFOptions> mutable_cf_options_list(1,
775 mutable_cf_options);
776 std::vector<autovector<VersionEdit*>> edit_lists(1, edit_list);
777 return LogAndApply(cfds, mutable_cf_options_list, edit_lists, mu,
778 db_directory, new_descriptor_log, column_family_options);
779 }
780
781 // The across-multi-cf batch version. If edit_lists contain more than
782 // 1 version edits, caller must ensure that no edit in the []list is column
783 // family manipulation.
784 Status LogAndApply(const std::vector<ColumnFamilyData*>& cfds,
785 const std::vector<MutableCFOptions>& mutable_cf_options,
786 const std::vector<autovector<VersionEdit*>>& edit_lists,
787 InstrumentedMutex* mu, Directory* db_directory = nullptr,
788 bool new_descriptor_log = false,
789 const ColumnFamilyOptions* new_cf_options = nullptr);
790
791 // Recover the last saved descriptor from persistent storage.
792 // If read_only == true, Recover() will not complain if some column families
793 // are not opened
794 Status Recover(const std::vector<ColumnFamilyDescriptor>& column_families,
795 bool read_only = false);
796
797 // Reads a manifest file and returns a list of column families in
798 // column_families.
799 static Status ListColumnFamilies(std::vector<std::string>* column_families,
800 const std::string& dbname, Env* env);
801
802 #ifndef ROCKSDB_LITE
803 // Try to reduce the number of levels. This call is valid when
804 // only one level from the new max level to the old
805 // max level containing files.
806 // The call is static, since number of levels is immutable during
807 // the lifetime of a RocksDB instance. It reduces number of levels
808 // in a DB by applying changes to manifest.
809 // For example, a db currently has 7 levels [0-6], and a call to
810 // to reduce to 5 [0-4] can only be executed when only one level
811 // among [4-6] contains files.
812 static Status ReduceNumberOfLevels(const std::string& dbname,
813 const Options* options,
814 const EnvOptions& env_options,
815 int new_levels);
816
817 // printf contents (for debugging)
818 Status DumpManifest(Options& options, std::string& manifestFileName,
819 bool verbose, bool hex = false, bool json = false);
820
821 #endif // ROCKSDB_LITE
822
823 // Return the current manifest file number
824 uint64_t manifest_file_number() const { return manifest_file_number_; }
825
826 uint64_t options_file_number() const { return options_file_number_; }
827
828 uint64_t pending_manifest_file_number() const {
829 return pending_manifest_file_number_;
830 }
831
832 uint64_t current_next_file_number() const { return next_file_number_.load(); }
833
834 uint64_t min_log_number_to_keep_2pc() const {
835 return min_log_number_to_keep_2pc_.load();
836 }
837
838 // Allocate and return a new file number
839 uint64_t NewFileNumber() { return next_file_number_.fetch_add(1); }
840
841 // Fetch And Add n new file number
842 uint64_t FetchAddFileNumber(uint64_t n) {
843 return next_file_number_.fetch_add(n);
844 }
845
846 // Return the last sequence number.
847 uint64_t LastSequence() const {
848 return last_sequence_.load(std::memory_order_acquire);
849 }
850
851 // Note: memory_order_acquire must be sufficient.
852 uint64_t LastAllocatedSequence() const {
853 return last_allocated_sequence_.load(std::memory_order_seq_cst);
854 }
855
856 // Note: memory_order_acquire must be sufficient.
857 uint64_t LastPublishedSequence() const {
858 return last_published_sequence_.load(std::memory_order_seq_cst);
859 }
860
861 // Set the last sequence number to s.
862 void SetLastSequence(uint64_t s) {
863 assert(s >= last_sequence_);
864 // Last visible sequence must always be less than last written seq
865 assert(!db_options_->two_write_queues || s <= last_allocated_sequence_);
866 last_sequence_.store(s, std::memory_order_release);
867 }
868
869 // Note: memory_order_release must be sufficient
870 void SetLastPublishedSequence(uint64_t s) {
871 assert(s >= last_published_sequence_);
872 last_published_sequence_.store(s, std::memory_order_seq_cst);
873 }
874
875 // Note: memory_order_release must be sufficient
876 void SetLastAllocatedSequence(uint64_t s) {
877 assert(s >= last_allocated_sequence_);
878 last_allocated_sequence_.store(s, std::memory_order_seq_cst);
879 }
880
881 // Note: memory_order_release must be sufficient
882 uint64_t FetchAddLastAllocatedSequence(uint64_t s) {
883 return last_allocated_sequence_.fetch_add(s, std::memory_order_seq_cst);
884 }
885
886 // Mark the specified file number as used.
887 // REQUIRED: this is only called during single-threaded recovery or repair.
888 void MarkFileNumberUsed(uint64_t number);
889
890 // Mark the specified log number as deleted
891 // REQUIRED: this is only called during single-threaded recovery or repair, or
892 // from ::LogAndApply where the global mutex is held.
893 void MarkMinLogNumberToKeep2PC(uint64_t number);
894
895 // Return the log file number for the log file that is currently
896 // being compacted, or zero if there is no such log file.
897 uint64_t prev_log_number() const { return prev_log_number_; }
898
899 // Returns the minimum log number which still has data not flushed to any SST
900 // file.
901 // In non-2PC mode, all the log numbers smaller than this number can be safely
902 // deleted.
903 uint64_t MinLogNumberWithUnflushedData() const {
904 return PreComputeMinLogNumberWithUnflushedData(nullptr);
905 }
906 // Returns the minimum log number which still has data not flushed to any SST
907 // file, except data from `cfd_to_skip`.
908 uint64_t PreComputeMinLogNumberWithUnflushedData(
909 const ColumnFamilyData* cfd_to_skip) const {
910 uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
911 for (auto cfd : *column_family_set_) {
912 if (cfd == cfd_to_skip) {
913 continue;
914 }
915 // It's safe to ignore dropped column families here:
916 // cfd->IsDropped() becomes true after the drop is persisted in MANIFEST.
917 if (min_log_num > cfd->GetLogNumber() && !cfd->IsDropped()) {
918 min_log_num = cfd->GetLogNumber();
919 }
920 }
921 return min_log_num;
922 }
923
924 // Create an iterator that reads over the compaction inputs for "*c".
925 // The caller should delete the iterator when no longer needed.
926 InternalIterator* MakeInputIterator(
927 const Compaction* c, RangeDelAggregator* range_del_agg,
928 const EnvOptions& env_options_compactions);
929
930 // Add all files listed in any live version to *live.
931 void AddLiveFiles(std::vector<FileDescriptor>* live_list);
932
933 // Return the approximate size of data to be scanned for range [start, end)
934 // in levels [start_level, end_level). If end_level == 0 it will search
935 // through all non-empty levels
936 uint64_t ApproximateSize(Version* v, const Slice& start, const Slice& end,
937 int start_level = 0, int end_level = -1);
938
939 // Return the size of the current manifest file
940 uint64_t manifest_file_size() const { return manifest_file_size_; }
941
942 // verify that the files that we started with for a compaction
943 // still exist in the current version and in the same original level.
944 // This ensures that a concurrent compaction did not erroneously
945 // pick the same files to compact.
946 bool VerifyCompactionFileConsistency(Compaction* c);
947
948 Status GetMetadataForFile(uint64_t number, int* filelevel,
949 FileMetaData** metadata, ColumnFamilyData** cfd);
950
951 // This function doesn't support leveldb SST filenames
952 void GetLiveFilesMetaData(std::vector<LiveFileMetaData> *metadata);
953
954 void GetObsoleteFiles(std::vector<ObsoleteFileInfo>* files,
955 std::vector<std::string>* manifest_filenames,
956 uint64_t min_pending_output);
957
958 ColumnFamilySet* GetColumnFamilySet() { return column_family_set_.get(); }
959 const EnvOptions& env_options() { return env_options_; }
960 void ChangeEnvOptions(const MutableDBOptions& new_options) {
961 env_options_.writable_file_max_buffer_size =
962 new_options.writable_file_max_buffer_size;
963 }
964
965 const ImmutableDBOptions* db_options() const { return db_options_; }
966
967 static uint64_t GetNumLiveVersions(Version* dummy_versions);
968
969 static uint64_t GetTotalSstFilesSize(Version* dummy_versions);
970
971 private:
972 struct ManifestWriter;
973
974 friend class Version;
975 friend class DBImpl;
976
977 struct LogReporter : public log::Reader::Reporter {
978 Status* status;
979 virtual void Corruption(size_t /*bytes*/, const Status& s) override {
980 if (this->status->ok()) *this->status = s;
981 }
982 };
983
984 // ApproximateSize helper
985 uint64_t ApproximateSizeLevel0(Version* v, const LevelFilesBrief& files_brief,
986 const Slice& start, const Slice& end);
987
988 uint64_t ApproximateSize(Version* v, const FdWithKeyRange& f,
989 const Slice& key);
990
991 // Save current contents to *log
992 Status WriteSnapshot(log::Writer* log);
993
994 void AppendVersion(ColumnFamilyData* column_family_data, Version* v);
995
996 ColumnFamilyData* CreateColumnFamily(const ColumnFamilyOptions& cf_options,
997 VersionEdit* edit);
998
999 Status ApplyOneVersionEdit(
1000 VersionEdit& edit,
1001 const std::unordered_map<std::string, ColumnFamilyOptions>& name_to_opts,
1002 std::unordered_map<int, std::string>& column_families_not_found,
1003 std::unordered_map<uint32_t, BaseReferencedVersionBuilder*>& builders,
1004 bool* have_log_number, uint64_t* log_number, bool* have_prev_log_number,
1005 uint64_t* previous_log_number, bool* have_next_file, uint64_t* next_file,
1006 bool* have_last_sequence, SequenceNumber* last_sequence,
1007 uint64_t* min_log_number_to_keep, uint32_t* max_column_family);
1008
1009 Status ProcessManifestWrites(std::deque<ManifestWriter>& writers,
1010 InstrumentedMutex* mu, Directory* db_directory,
1011 bool new_descriptor_log,
1012 const ColumnFamilyOptions* new_cf_options);
1013
1014 std::unique_ptr<ColumnFamilySet> column_family_set_;
1015
1016 Env* const env_;
1017 const std::string dbname_;
1018 const ImmutableDBOptions* const db_options_;
1019 std::atomic<uint64_t> next_file_number_;
1020 // Any log number equal or lower than this should be ignored during recovery,
1021 // and is qualified for being deleted in 2PC mode. In non-2PC mode, this
1022 // number is ignored.
1023 std::atomic<uint64_t> min_log_number_to_keep_2pc_ = {0};
1024 uint64_t manifest_file_number_;
1025 uint64_t options_file_number_;
1026 uint64_t pending_manifest_file_number_;
1027 // The last seq visible to reads. It normally indicates the last sequence in
1028 // the memtable but when using two write queues it could also indicate the
1029 // last sequence in the WAL visible to reads.
1030 std::atomic<uint64_t> last_sequence_;
1031 // The last seq that is already allocated. It is applicable only when we have
1032 // two write queues. In that case seq might or might not have appreated in
1033 // memtable but it is expected to appear in the WAL.
1034 // We have last_sequence <= last_allocated_sequence_
1035 std::atomic<uint64_t> last_allocated_sequence_;
1036 // The last allocated sequence that is also published to the readers. This is
1037 // applicable only when last_seq_same_as_publish_seq_ is not set. Otherwise
1038 // last_sequence_ also indicates the last published seq.
1039 // We have last_sequence <= last_published_sequence_ <=
1040 // last_allocated_sequence_
1041 std::atomic<uint64_t> last_published_sequence_;
1042 uint64_t prev_log_number_; // 0 or backing store for memtable being compacted
1043
1044 // Opened lazily
1045 unique_ptr<log::Writer> descriptor_log_;
1046
1047 // generates a increasing version number for every new version
1048 uint64_t current_version_number_;
1049
1050 // Queue of writers to the manifest file
1051 std::deque<ManifestWriter*> manifest_writers_;
1052
1053 // Current size of manifest file
1054 uint64_t manifest_file_size_;
1055
1056 std::vector<ObsoleteFileInfo> obsolete_files_;
1057 std::vector<std::string> obsolete_manifests_;
1058
1059 // env options for all reads and writes except compactions
1060 EnvOptions env_options_;
1061
1062 // No copying allowed
1063 VersionSet(const VersionSet&);
1064 void operator=(const VersionSet&);
1065
1066 void LogAndApplyCFHelper(VersionEdit* edit);
1067 void LogAndApplyHelper(ColumnFamilyData* cfd, VersionBuilder* b, Version* v,
1068 VersionEdit* edit, InstrumentedMutex* mu);
1069 };
1070
1071 } // namespace rocksdb