]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/cache/cache_entry_stats.h
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / rocksdb / cache / cache_entry_stats.h
1 // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5
6 #pragma once
7
8 #include <array>
9 #include <cstdint>
10 #include <memory>
11 #include <mutex>
12
13 #include "cache/cache_helpers.h"
14 #include "cache/cache_key.h"
15 #include "port/lang.h"
16 #include "rocksdb/cache.h"
17 #include "rocksdb/status.h"
18 #include "rocksdb/system_clock.h"
19 #include "test_util/sync_point.h"
20 #include "util/coding_lean.h"
21
22 namespace ROCKSDB_NAMESPACE {
23
24 // A generic helper object for gathering stats about cache entries by
25 // iterating over them with ApplyToAllEntries. This class essentially
26 // solves the problem of slowing down a Cache with too many stats
27 // collectors that could be sharing stat results, such as from multiple
28 // column families or multiple DBs sharing a Cache. We employ a few
29 // mitigations:
30 // * Only one collector for a particular kind of Stats is alive
31 // for each Cache. This is guaranteed using the Cache itself to hold
32 // the collector.
33 // * A mutex ensures only one thread is gathering stats for this
34 // collector.
35 // * The most recent gathered stats are saved and simply copied to
36 // satisfy requests within a time window (default: 3 minutes) of
37 // completion of the most recent stat gathering.
38 //
39 // Template parameter Stats must be copyable and trivially constructable,
40 // as well as...
41 // concept Stats {
42 // // Notification before applying callback to all entries
43 // void BeginCollection(Cache*, SystemClock*, uint64_t start_time_micros);
44 // // Get the callback to apply to all entries. `callback`
45 // // type must be compatible with Cache::ApplyToAllEntries
46 // callback GetEntryCallback();
47 // // Notification after applying callback to all entries
48 // void EndCollection(Cache*, SystemClock*, uint64_t end_time_micros);
49 // // Notification that a collection was skipped because of
50 // // sufficiently recent saved results.
51 // void SkippedCollection();
52 // }
53 template <class Stats>
54 class CacheEntryStatsCollector {
55 public:
56 // Gather and save stats if saved stats are too old. (Use GetStats() to
57 // read saved stats.)
58 //
59 // Maximum allowed age for a "hit" on saved results is determined by the
60 // two interval parameters. Both set to 0 forces a re-scan. For example
61 // with min_interval_seconds=300 and min_interval_factor=100, if the last
62 // scan took 10s, we would only rescan ("miss") if the age in seconds of
63 // the saved results is > max(300, 100*10).
64 // Justification: scans can vary wildly in duration, e.g. from 0.02 sec
65 // to as much as 20 seconds, so we want to be able to cap the absolute
66 // and relative frequency of scans.
67 void CollectStats(int min_interval_seconds, int min_interval_factor) {
68 // Waits for any pending reader or writer (collector)
69 std::lock_guard<std::mutex> lock(working_mutex_);
70
71 uint64_t max_age_micros =
72 static_cast<uint64_t>(std::max(min_interval_seconds, 0)) * 1000000U;
73
74 if (last_end_time_micros_ > last_start_time_micros_ &&
75 min_interval_factor > 0) {
76 max_age_micros = std::max(
77 max_age_micros, min_interval_factor * (last_end_time_micros_ -
78 last_start_time_micros_));
79 }
80
81 uint64_t start_time_micros = clock_->NowMicros();
82 if ((start_time_micros - last_end_time_micros_) > max_age_micros) {
83 last_start_time_micros_ = start_time_micros;
84 working_stats_.BeginCollection(cache_, clock_, start_time_micros);
85
86 cache_->ApplyToAllEntries(working_stats_.GetEntryCallback(), {});
87 TEST_SYNC_POINT_CALLBACK(
88 "CacheEntryStatsCollector::GetStats:AfterApplyToAllEntries", nullptr);
89
90 uint64_t end_time_micros = clock_->NowMicros();
91 last_end_time_micros_ = end_time_micros;
92 working_stats_.EndCollection(cache_, clock_, end_time_micros);
93 } else {
94 working_stats_.SkippedCollection();
95 }
96
97 // Save so that we don't need to wait for an outstanding collection in
98 // order to make of copy of the last saved stats
99 std::lock_guard<std::mutex> lock2(saved_mutex_);
100 saved_stats_ = working_stats_;
101 }
102
103 // Gets saved stats, regardless of age
104 void GetStats(Stats *stats) {
105 std::lock_guard<std::mutex> lock(saved_mutex_);
106 *stats = saved_stats_;
107 }
108
109 Cache *GetCache() const { return cache_; }
110
111 // Gets or creates a shared instance of CacheEntryStatsCollector in the
112 // cache itself, and saves into `ptr`. This shared_ptr will hold the
113 // entry in cache until all refs are destroyed.
114 static Status GetShared(Cache *cache, SystemClock *clock,
115 std::shared_ptr<CacheEntryStatsCollector> *ptr) {
116 const Slice &cache_key = GetCacheKey();
117
118 Cache::Handle *h = cache->Lookup(cache_key);
119 if (h == nullptr) {
120 // Not yet in cache, but Cache doesn't provide a built-in way to
121 // avoid racing insert. So we double-check under a shared mutex,
122 // inspired by TableCache.
123 STATIC_AVOID_DESTRUCTION(std::mutex, static_mutex);
124 std::lock_guard<std::mutex> lock(static_mutex);
125
126 h = cache->Lookup(cache_key);
127 if (h == nullptr) {
128 auto new_ptr = new CacheEntryStatsCollector(cache, clock);
129 // TODO: non-zero charge causes some tests that count block cache
130 // usage to go flaky. Fix the problem somehow so we can use an
131 // accurate charge.
132 size_t charge = 0;
133 Status s = cache->Insert(cache_key, new_ptr, charge, Deleter, &h,
134 Cache::Priority::HIGH);
135 if (!s.ok()) {
136 assert(h == nullptr);
137 delete new_ptr;
138 return s;
139 }
140 }
141 }
142 // If we reach here, shared entry is in cache with handle `h`.
143 assert(cache->GetDeleter(h) == Deleter);
144
145 // Build an aliasing shared_ptr that keeps `ptr` in cache while there
146 // are references.
147 *ptr = MakeSharedCacheHandleGuard<CacheEntryStatsCollector>(cache, h);
148 return Status::OK();
149 }
150
151 private:
152 explicit CacheEntryStatsCollector(Cache *cache, SystemClock *clock)
153 : saved_stats_(),
154 working_stats_(),
155 last_start_time_micros_(0),
156 last_end_time_micros_(/*pessimistic*/ 10000000),
157 cache_(cache),
158 clock_(clock) {}
159
160 static void Deleter(const Slice &, void *value) {
161 delete static_cast<CacheEntryStatsCollector *>(value);
162 }
163
164 static const Slice &GetCacheKey() {
165 // For each template instantiation
166 static CacheKey ckey = CacheKey::CreateUniqueForProcessLifetime();
167 static Slice ckey_slice = ckey.AsSlice();
168 return ckey_slice;
169 }
170
171 std::mutex saved_mutex_;
172 Stats saved_stats_;
173
174 std::mutex working_mutex_;
175 Stats working_stats_;
176 uint64_t last_start_time_micros_;
177 uint64_t last_end_time_micros_;
178
179 Cache *const cache_;
180 SystemClock *const clock_;
181 };
182
183 } // namespace ROCKSDB_NAMESPACE